2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/arch/x86_64/entry.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
|
|
|
|
* Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* entry.S contains the system-call and fault low-level handling routines.
|
|
|
|
*
|
|
|
|
* NOTE: This code handles signal-recognition, which happens every time
|
|
|
|
* after an interrupt and after each system call.
|
2008-11-16 22:29:00 +08:00
|
|
|
*
|
|
|
|
* Normal syscalls and interrupts don't save a full stack frame, this is
|
2005-04-17 06:20:36 +08:00
|
|
|
* only done for syscall tracing, signals or fork/exec et.al.
|
2008-11-16 22:29:00 +08:00
|
|
|
*
|
|
|
|
* A note on terminology:
|
|
|
|
* - top of stack: Architecture defined interrupt frame from SS to RIP
|
|
|
|
* at the top of the kernel process stack.
|
2005-04-17 06:20:36 +08:00
|
|
|
* - partial stack frame: partially saved registers upto R11.
|
2008-11-16 22:29:00 +08:00
|
|
|
* - full stack frame: Like partial stack frame, but all register saved.
|
2006-09-26 16:52:29 +08:00
|
|
|
*
|
|
|
|
* Some macro usage:
|
|
|
|
* - CFI macros are used to generate dwarf2 unwind information for better
|
|
|
|
* backtraces. They don't change any code.
|
|
|
|
* - SAVE_ALL/RESTORE_ALL - Save/restore all registers
|
|
|
|
* - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
|
|
|
|
* There are unfortunately lots of special cases where some registers
|
|
|
|
* not touched. The macro is a big mess that should be cleaned up.
|
|
|
|
* - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
|
|
|
|
* Gives a full stack frame.
|
|
|
|
* - ENTRY/END Define functions in the symbol table.
|
|
|
|
* - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
|
|
|
|
* frame that is otherwise undefined after a SYSCALL
|
|
|
|
* - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
|
|
|
|
* - errorentry/paranoidentry/zeroentry - Define exception entry points.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/segment.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#include <asm/dwarf2.h>
|
|
|
|
#include <asm/calling.h>
|
2005-09-10 03:28:48 +08:00
|
|
|
#include <asm/asm-offsets.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/msr.h>
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/hw_irq.h>
|
2009-02-14 03:14:01 +08:00
|
|
|
#include <asm/page_types.h>
|
2006-07-03 15:24:45 +08:00
|
|
|
#include <asm/irqflags.h>
|
2008-01-30 20:32:08 +08:00
|
|
|
#include <asm/paravirt.h>
|
2008-06-22 02:17:27 +08:00
|
|
|
#include <asm/ftrace.h>
|
2009-01-13 19:41:35 +08:00
|
|
|
#include <asm/percpu.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-24 06:37:04 +08:00
|
|
|
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
|
|
|
#include <linux/elf-em.h>
|
|
|
|
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
|
|
|
|
#define __AUDIT_ARCH_64BIT 0x80000000
|
|
|
|
#define __AUDIT_ARCH_LE 0x40000000
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
.code64
|
2008-10-07 07:06:12 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-05-13 03:20:43 +08:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
ENTRY(mcount)
|
|
|
|
retq
|
|
|
|
END(mcount)
|
|
|
|
|
|
|
|
ENTRY(ftrace_caller)
|
2008-11-06 05:05:44 +08:00
|
|
|
cmpl $0, function_trace_stop
|
|
|
|
jne ftrace_stub
|
2008-05-13 03:20:43 +08:00
|
|
|
|
2008-12-13 05:09:08 +08:00
|
|
|
MCOUNT_SAVE_FRAME
|
2008-05-13 03:20:43 +08:00
|
|
|
|
|
|
|
movq 0x38(%rsp), %rdi
|
|
|
|
movq 8(%rbp), %rsi
|
2008-06-22 02:17:27 +08:00
|
|
|
subq $MCOUNT_INSN_SIZE, %rdi
|
2008-05-13 03:20:43 +08:00
|
|
|
|
2009-02-24 03:57:01 +08:00
|
|
|
GLOBAL(ftrace_call)
|
2008-05-13 03:20:43 +08:00
|
|
|
call ftrace_stub
|
|
|
|
|
2008-12-13 05:09:08 +08:00
|
|
|
MCOUNT_RESTORE_FRAME
|
2008-05-13 03:20:43 +08:00
|
|
|
|
2008-12-02 07:20:39 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
2009-02-24 03:57:01 +08:00
|
|
|
GLOBAL(ftrace_graph_call)
|
2008-12-02 07:20:39 +08:00
|
|
|
jmp ftrace_stub
|
|
|
|
#endif
|
2008-05-13 03:20:43 +08:00
|
|
|
|
2009-02-24 03:57:01 +08:00
|
|
|
GLOBAL(ftrace_stub)
|
2008-05-13 03:20:43 +08:00
|
|
|
retq
|
|
|
|
END(ftrace_caller)
|
|
|
|
|
|
|
|
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
2008-05-13 03:20:42 +08:00
|
|
|
ENTRY(mcount)
|
2008-11-06 05:05:44 +08:00
|
|
|
cmpl $0, function_trace_stop
|
|
|
|
jne ftrace_stub
|
|
|
|
|
2008-05-13 03:20:42 +08:00
|
|
|
cmpq $ftrace_stub, ftrace_trace_function
|
|
|
|
jnz trace
|
2008-12-02 07:20:39 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
cmpq $ftrace_stub, ftrace_graph_return
|
|
|
|
jnz ftrace_graph_caller
|
2008-12-03 12:50:05 +08:00
|
|
|
|
|
|
|
cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
|
|
|
|
jnz ftrace_graph_caller
|
2008-12-02 07:20:39 +08:00
|
|
|
#endif
|
|
|
|
|
2009-02-24 03:57:01 +08:00
|
|
|
GLOBAL(ftrace_stub)
|
2008-05-13 03:20:42 +08:00
|
|
|
retq
|
|
|
|
|
|
|
|
trace:
|
2008-12-13 05:09:08 +08:00
|
|
|
MCOUNT_SAVE_FRAME
|
2008-05-13 03:20:42 +08:00
|
|
|
|
|
|
|
movq 0x38(%rsp), %rdi
|
|
|
|
movq 8(%rbp), %rsi
|
2008-06-22 02:17:27 +08:00
|
|
|
subq $MCOUNT_INSN_SIZE, %rdi
|
2008-05-13 03:20:42 +08:00
|
|
|
|
|
|
|
call *ftrace_trace_function
|
|
|
|
|
2008-12-13 05:09:08 +08:00
|
|
|
MCOUNT_RESTORE_FRAME
|
2008-05-13 03:20:42 +08:00
|
|
|
|
|
|
|
jmp ftrace_stub
|
|
|
|
END(mcount)
|
2008-05-13 03:20:43 +08:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
2008-10-07 07:06:12 +08:00
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
2008-05-13 03:20:42 +08:00
|
|
|
|
2008-12-02 07:20:39 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
ENTRY(ftrace_graph_caller)
|
|
|
|
cmpl $0, function_trace_stop
|
|
|
|
jne ftrace_stub
|
|
|
|
|
2008-12-13 05:09:08 +08:00
|
|
|
MCOUNT_SAVE_FRAME
|
2008-12-02 07:20:39 +08:00
|
|
|
|
|
|
|
leaq 8(%rbp), %rdi
|
|
|
|
movq 0x38(%rsp), %rsi
|
function-graph: add stack frame test
In case gcc does something funny with the stack frames, or the return
from function code, we would like to detect that.
An arch may implement passing of a variable that is unique to the
function and can be saved on entering a function and can be tested
when exiting the function. Usually the frame pointer can be used for
this purpose.
This patch also implements this for x86. Where it passes in the stack
frame of the parent function, and will test that frame on exit.
There was a case in x86_32 with optimize for size (-Os) where, for a
few functions, gcc would align the stack frame and place a copy of the
return address into it. The function graph tracer modified the copy and
not the actual return address. On return from the funtion, it did not go
to the tracer hook, but returned to the parent. This broke the function
graph tracer, because the return of the parent (where gcc did not do
this funky manipulation) returned to the location that the child function
was suppose to. This caused strange kernel crashes.
This test detected the problem and pointed out where the issue was.
This modifies the parameters of one of the functions that the arch
specific code calls, so it includes changes to arch code to accommodate
the new prototype.
Note, I notice that the parsic arch implements its own push_return_trace.
This is now a generic function and the ftrace_push_return_trace should be
used instead. This patch does not touch that code.
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-06-19 00:45:08 +08:00
|
|
|
movq (%rbp), %rdx
|
2008-12-03 04:34:09 +08:00
|
|
|
subq $MCOUNT_INSN_SIZE, %rsi
|
2008-12-02 07:20:39 +08:00
|
|
|
|
|
|
|
call prepare_ftrace_return
|
|
|
|
|
2008-12-13 05:09:08 +08:00
|
|
|
MCOUNT_RESTORE_FRAME
|
|
|
|
|
2008-12-02 07:20:39 +08:00
|
|
|
retq
|
|
|
|
END(ftrace_graph_caller)
|
|
|
|
|
2009-02-24 03:57:01 +08:00
|
|
|
GLOBAL(return_to_handler)
|
2009-07-29 16:58:37 +08:00
|
|
|
subq $24, %rsp
|
2008-12-02 07:20:39 +08:00
|
|
|
|
2009-03-26 02:30:04 +08:00
|
|
|
/* Save the return values */
|
2008-05-13 03:20:42 +08:00
|
|
|
movq %rax, (%rsp)
|
2009-03-26 02:30:04 +08:00
|
|
|
movq %rdx, 8(%rsp)
|
function-graph: add stack frame test
In case gcc does something funny with the stack frames, or the return
from function code, we would like to detect that.
An arch may implement passing of a variable that is unique to the
function and can be saved on entering a function and can be tested
when exiting the function. Usually the frame pointer can be used for
this purpose.
This patch also implements this for x86. Where it passes in the stack
frame of the parent function, and will test that frame on exit.
There was a case in x86_32 with optimize for size (-Os) where, for a
few functions, gcc would align the stack frame and place a copy of the
return address into it. The function graph tracer modified the copy and
not the actual return address. On return from the funtion, it did not go
to the tracer hook, but returned to the parent. This broke the function
graph tracer, because the return of the parent (where gcc did not do
this funky manipulation) returned to the location that the child function
was suppose to. This caused strange kernel crashes.
This test detected the problem and pointed out where the issue was.
This modifies the parameters of one of the functions that the arch
specific code calls, so it includes changes to arch code to accommodate
the new prototype.
Note, I notice that the parsic arch implements its own push_return_trace.
This is now a generic function and the ftrace_push_return_trace should be
used instead. This patch does not touch that code.
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-06-19 00:45:08 +08:00
|
|
|
movq %rbp, %rdi
|
2008-05-13 03:20:42 +08:00
|
|
|
|
2008-12-02 07:20:39 +08:00
|
|
|
call ftrace_return_to_handler
|
2008-05-13 03:20:42 +08:00
|
|
|
|
2009-07-29 16:58:37 +08:00
|
|
|
movq %rax, 16(%rsp)
|
2009-03-26 02:30:04 +08:00
|
|
|
movq 8(%rsp), %rdx
|
2008-05-13 03:20:42 +08:00
|
|
|
movq (%rsp), %rax
|
2009-07-29 16:58:37 +08:00
|
|
|
addq $16, %rsp
|
2008-12-02 07:20:39 +08:00
|
|
|
retq
|
|
|
|
#endif
|
2008-05-13 03:20:42 +08:00
|
|
|
|
|
|
|
|
2005-04-17 06:25:05 +08:00
|
|
|
#ifndef CONFIG_PREEMPT
|
2005-04-17 06:20:36 +08:00
|
|
|
#define retint_kernel retint_restore_args
|
2008-11-16 22:29:00 +08:00
|
|
|
#endif
|
2006-07-03 15:24:45 +08:00
|
|
|
|
2008-01-30 20:32:08 +08:00
|
|
|
#ifdef CONFIG_PARAVIRT
|
2008-06-25 12:19:28 +08:00
|
|
|
ENTRY(native_usergs_sysret64)
|
2008-01-30 20:32:08 +08:00
|
|
|
swapgs
|
|
|
|
sysretq
|
2009-02-24 03:57:00 +08:00
|
|
|
ENDPROC(native_usergs_sysret64)
|
2008-01-30 20:32:08 +08:00
|
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
|
2006-07-03 15:24:45 +08:00
|
|
|
|
|
|
|
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
|
|
|
|
jnc 1f
|
|
|
|
TRACE_IRQS_ON
|
|
|
|
1:
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2008-11-16 22:29:00 +08:00
|
|
|
* C code is not supposed to know about undefined top of stack. Every time
|
|
|
|
* a C function with an pt_regs argument is called from the SYSCALL based
|
2005-04-17 06:20:36 +08:00
|
|
|
* fast path FIXUP_TOP_OF_STACK is needed.
|
|
|
|
* RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
|
|
|
|
* manipulation.
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* %rsp:at FRAMEEND */
|
2008-11-21 23:41:55 +08:00
|
|
|
.macro FIXUP_TOP_OF_STACK tmp offset=0
|
2009-01-18 23:38:58 +08:00
|
|
|
movq PER_CPU_VAR(old_rsp),\tmp
|
2008-11-21 23:41:55 +08:00
|
|
|
movq \tmp,RSP+\offset(%rsp)
|
|
|
|
movq $__USER_DS,SS+\offset(%rsp)
|
|
|
|
movq $__USER_CS,CS+\offset(%rsp)
|
|
|
|
movq $-1,RCX+\offset(%rsp)
|
|
|
|
movq R11+\offset(%rsp),\tmp /* get eflags */
|
|
|
|
movq \tmp,EFLAGS+\offset(%rsp)
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
2008-11-21 23:41:55 +08:00
|
|
|
.macro RESTORE_TOP_OF_STACK tmp offset=0
|
|
|
|
movq RSP+\offset(%rsp),\tmp
|
2009-01-18 23:38:58 +08:00
|
|
|
movq \tmp,PER_CPU_VAR(old_rsp)
|
2008-11-21 23:41:55 +08:00
|
|
|
movq EFLAGS+\offset(%rsp),\tmp
|
|
|
|
movq \tmp,R11+\offset(%rsp)
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro FAKE_STACK_FRAME child_rip
|
|
|
|
/* push in order ss, rsp, eflags, cs, rip */
|
2005-07-29 12:15:48 +08:00
|
|
|
xorl %eax, %eax
|
2008-06-25 12:19:25 +08:00
|
|
|
pushq $__KERNEL_DS /* ss */
|
2005-04-17 06:20:36 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-09-13 00:49:24 +08:00
|
|
|
/*CFI_REL_OFFSET ss,0*/
|
2005-04-17 06:20:36 +08:00
|
|
|
pushq %rax /* rsp */
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_REL_OFFSET rsp,0
|
2008-11-27 03:17:02 +08:00
|
|
|
pushq $X86_EFLAGS_IF /* eflags - interrupts on */
|
2005-04-17 06:20:36 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-09-13 00:49:24 +08:00
|
|
|
/*CFI_REL_OFFSET rflags,0*/
|
2005-04-17 06:20:36 +08:00
|
|
|
pushq $__KERNEL_CS /* cs */
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-09-13 00:49:24 +08:00
|
|
|
/*CFI_REL_OFFSET cs,0*/
|
2005-04-17 06:20:36 +08:00
|
|
|
pushq \child_rip /* rip */
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_REL_OFFSET rip,0
|
2005-04-17 06:20:36 +08:00
|
|
|
pushq %rax /* orig rax */
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro UNFAKE_STACK_FRAME
|
|
|
|
addq $8*6, %rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET -(6*8)
|
|
|
|
.endm
|
|
|
|
|
2008-11-20 21:40:11 +08:00
|
|
|
/*
|
|
|
|
* initial frame state for interrupts (and exceptions without error code)
|
|
|
|
*/
|
|
|
|
.macro EMPTY_FRAME start=1 offset=0
|
2005-09-13 00:49:24 +08:00
|
|
|
.if \start
|
2008-11-20 21:40:11 +08:00
|
|
|
CFI_STARTPROC simple
|
2006-09-26 16:52:41 +08:00
|
|
|
CFI_SIGNAL_FRAME
|
2008-11-20 21:40:11 +08:00
|
|
|
CFI_DEF_CFA rsp,8+\offset
|
2005-09-13 00:49:24 +08:00
|
|
|
.else
|
2008-11-20 21:40:11 +08:00
|
|
|
CFI_DEF_CFA_OFFSET 8+\offset
|
2005-09-13 00:49:24 +08:00
|
|
|
.endif
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
|
|
|
|
/*
|
2008-11-20 21:40:11 +08:00
|
|
|
* initial frame state for interrupts (and exceptions without error code)
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
*/
|
2008-11-20 21:40:11 +08:00
|
|
|
.macro INTR_FRAME start=1 offset=0
|
2008-11-21 22:11:32 +08:00
|
|
|
EMPTY_FRAME \start, SS+8+\offset-RIP
|
|
|
|
/*CFI_REL_OFFSET ss, SS+\offset-RIP*/
|
|
|
|
CFI_REL_OFFSET rsp, RSP+\offset-RIP
|
|
|
|
/*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
|
|
|
|
/*CFI_REL_OFFSET cs, CS+\offset-RIP*/
|
|
|
|
CFI_REL_OFFSET rip, RIP+\offset-RIP
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initial frame state for exceptions with error code (and interrupts
|
|
|
|
* with vector already pushed)
|
|
|
|
*/
|
2008-11-20 21:40:11 +08:00
|
|
|
.macro XCPT_FRAME start=1 offset=0
|
2008-11-21 22:11:32 +08:00
|
|
|
INTR_FRAME \start, RIP+\offset-ORIG_RAX
|
2008-11-20 21:40:11 +08:00
|
|
|
/*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* frame that enables calling into C.
|
|
|
|
*/
|
|
|
|
.macro PARTIAL_FRAME start=1 offset=0
|
2008-11-21 22:11:32 +08:00
|
|
|
XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
|
|
|
|
CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
|
2008-11-20 21:40:11 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* frame that enables passing a complete pt_regs to a C function.
|
|
|
|
*/
|
|
|
|
.macro DEFAULT_FRAME start=1 offset=0
|
2008-11-21 22:11:32 +08:00
|
|
|
PARTIAL_FRAME \start, R11+\offset-R15
|
2008-11-20 21:40:11 +08:00
|
|
|
CFI_REL_OFFSET rbx, RBX+\offset
|
|
|
|
CFI_REL_OFFSET rbp, RBP+\offset
|
|
|
|
CFI_REL_OFFSET r12, R12+\offset
|
|
|
|
CFI_REL_OFFSET r13, R13+\offset
|
|
|
|
CFI_REL_OFFSET r14, R14+\offset
|
|
|
|
CFI_REL_OFFSET r15, R15+\offset
|
|
|
|
.endm
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
|
|
|
|
/* save partial stack frame */
|
|
|
|
ENTRY(save_args)
|
|
|
|
XCPT_FRAME
|
|
|
|
cld
|
2008-11-21 22:20:47 +08:00
|
|
|
movq_cfi rdi, RDI+16-ARGOFFSET
|
|
|
|
movq_cfi rsi, RSI+16-ARGOFFSET
|
|
|
|
movq_cfi rdx, RDX+16-ARGOFFSET
|
|
|
|
movq_cfi rcx, RCX+16-ARGOFFSET
|
|
|
|
movq_cfi rax, RAX+16-ARGOFFSET
|
|
|
|
movq_cfi r8, R8+16-ARGOFFSET
|
|
|
|
movq_cfi r9, R9+16-ARGOFFSET
|
|
|
|
movq_cfi r10, R10+16-ARGOFFSET
|
|
|
|
movq_cfi r11, R11+16-ARGOFFSET
|
|
|
|
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
|
2008-11-21 22:20:47 +08:00
|
|
|
movq_cfi rbp, 8 /* push %rbp */
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
|
|
|
|
testl $3, CS(%rdi)
|
|
|
|
je 1f
|
|
|
|
SWAPGS
|
|
|
|
/*
|
2009-01-18 23:38:58 +08:00
|
|
|
* irq_count is used to check if a CPU is already on an interrupt stack
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
* or not. While this is essentially redundant with preempt_count it is
|
|
|
|
* a little cheaper to use a separate counter in the PDA (short of
|
|
|
|
* moving irq_enter into assembly, which would be too much work)
|
|
|
|
*/
|
2009-01-18 23:38:58 +08:00
|
|
|
1: incl PER_CPU_VAR(irq_count)
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
jne 2f
|
2008-11-21 22:20:47 +08:00
|
|
|
popq_cfi %rax /* move return address... */
|
2009-01-18 23:38:58 +08:00
|
|
|
mov PER_CPU_VAR(irq_stack_ptr),%rsp
|
2008-11-20 21:40:11 +08:00
|
|
|
EMPTY_FRAME 0
|
2009-01-31 00:50:54 +08:00
|
|
|
pushq_cfi %rbp /* backlink for unwinder */
|
2008-11-21 22:20:47 +08:00
|
|
|
pushq_cfi %rax /* ... to the new stack */
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
/*
|
|
|
|
* We entered an interrupt context - irqs are off:
|
|
|
|
*/
|
|
|
|
2: TRACE_IRQS_OFF
|
|
|
|
ret
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(save_args)
|
|
|
|
|
2008-11-21 23:41:55 +08:00
|
|
|
ENTRY(save_rest)
|
|
|
|
PARTIAL_FRAME 1 REST_SKIP+8
|
|
|
|
movq 5*8+16(%rsp), %r11 /* save return address */
|
|
|
|
movq_cfi rbx, RBX+16
|
|
|
|
movq_cfi rbp, RBP+16
|
|
|
|
movq_cfi r12, R12+16
|
|
|
|
movq_cfi r13, R13+16
|
|
|
|
movq_cfi r14, R14+16
|
|
|
|
movq_cfi r15, R15+16
|
|
|
|
movq %r11, 8(%rsp) /* return address */
|
|
|
|
FIXUP_TOP_OF_STACK %r11, 16
|
|
|
|
ret
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(save_rest)
|
|
|
|
|
2008-11-21 23:43:18 +08:00
|
|
|
/* save complete stack frame */
|
2009-03-12 18:38:55 +08:00
|
|
|
.pushsection .kprobes.text, "ax"
|
2008-11-21 23:43:18 +08:00
|
|
|
ENTRY(save_paranoid)
|
|
|
|
XCPT_FRAME 1 RDI+8
|
|
|
|
cld
|
|
|
|
movq_cfi rdi, RDI+8
|
|
|
|
movq_cfi rsi, RSI+8
|
|
|
|
movq_cfi rdx, RDX+8
|
|
|
|
movq_cfi rcx, RCX+8
|
|
|
|
movq_cfi rax, RAX+8
|
|
|
|
movq_cfi r8, R8+8
|
|
|
|
movq_cfi r9, R9+8
|
|
|
|
movq_cfi r10, R10+8
|
|
|
|
movq_cfi r11, R11+8
|
|
|
|
movq_cfi rbx, RBX+8
|
|
|
|
movq_cfi rbp, RBP+8
|
|
|
|
movq_cfi r12, R12+8
|
|
|
|
movq_cfi r13, R13+8
|
|
|
|
movq_cfi r14, R14+8
|
|
|
|
movq_cfi r15, R15+8
|
|
|
|
movl $1,%ebx
|
|
|
|
movl $MSR_GS_BASE,%ecx
|
|
|
|
rdmsr
|
|
|
|
testl %edx,%edx
|
|
|
|
js 1f /* negative -> in kernel */
|
|
|
|
SWAPGS
|
|
|
|
xorl %ebx,%ebx
|
|
|
|
1: ret
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(save_paranoid)
|
2009-03-12 18:38:55 +08:00
|
|
|
.popsection
|
2008-11-21 23:43:18 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2008-11-27 21:41:21 +08:00
|
|
|
* A newly forked process directly context switches into this address.
|
|
|
|
*
|
|
|
|
* rdi: prev task we switched from
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
ENTRY(ret_from_fork)
|
2008-11-20 21:40:11 +08:00
|
|
|
DEFAULT_FRAME
|
2008-11-27 21:41:21 +08:00
|
|
|
|
2009-01-11 12:00:22 +08:00
|
|
|
LOCK ; btr $TIF_FORK,TI_flags(%r8)
|
|
|
|
|
2006-09-26 16:52:41 +08:00
|
|
|
push kernel_eflags(%rip)
|
2008-07-23 00:14:16 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2008-11-27 21:41:21 +08:00
|
|
|
popf # reset kernel eflags
|
2008-07-23 00:14:16 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2008-11-27 21:41:21 +08:00
|
|
|
|
|
|
|
call schedule_tail # rdi: 'prev' task parameter
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rcx)
|
2008-11-27 21:41:21 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
RESTORE_REST
|
2008-11-27 21:41:21 +08:00
|
|
|
|
|
|
|
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
|
2005-04-17 06:20:36 +08:00
|
|
|
je int_ret_from_sys_call
|
2008-11-27 21:41:21 +08:00
|
|
|
|
|
|
|
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
|
2005-04-17 06:20:36 +08:00
|
|
|
jnz int_ret_from_sys_call
|
2008-11-27 21:41:21 +08:00
|
|
|
|
2008-11-21 23:41:55 +08:00
|
|
|
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
|
2008-11-27 21:41:21 +08:00
|
|
|
jmp ret_from_sys_call # go to the SYSRET fastpath
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
CFI_ENDPROC
|
2006-06-26 19:56:55 +08:00
|
|
|
END(ret_from_fork)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* System call entry. Upto 6 arguments in registers are supported.
|
|
|
|
*
|
|
|
|
* SYSCALL does not save anything on the stack and does not change the
|
|
|
|
* stack pointer.
|
|
|
|
*/
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2008-11-16 22:29:00 +08:00
|
|
|
* Register setup:
|
2005-04-17 06:20:36 +08:00
|
|
|
* rax system call number
|
|
|
|
* rdi arg0
|
2008-11-16 22:29:00 +08:00
|
|
|
* rcx return address for syscall/sysret, C arg3
|
2005-04-17 06:20:36 +08:00
|
|
|
* rsi arg1
|
2008-11-16 22:29:00 +08:00
|
|
|
* rdx arg2
|
2005-04-17 06:20:36 +08:00
|
|
|
* r10 arg3 (--> moved to rcx for C)
|
|
|
|
* r8 arg4
|
|
|
|
* r9 arg5
|
|
|
|
* r11 eflags for syscall/sysret, temporary for C
|
2008-11-16 22:29:00 +08:00
|
|
|
* r12-r15,rbp,rbx saved by C code, not touched.
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Interrupts are off on entry.
|
|
|
|
* Only called from user space.
|
|
|
|
*
|
|
|
|
* XXX if we had a free scratch register we could save the RSP into the stack frame
|
|
|
|
* and report it properly in ps. Unfortunately we haven't.
|
2006-04-08 01:50:00 +08:00
|
|
|
*
|
|
|
|
* When user can change the frames always force IRET. That is because
|
|
|
|
* it deals with uncanonical addresses better. SYSRET has trouble
|
|
|
|
* with them due to bugs in both AMD and Intel CPUs.
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ENTRY(system_call)
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_STARTPROC simple
|
2006-09-26 16:52:41 +08:00
|
|
|
CFI_SIGNAL_FRAME
|
2009-01-18 23:38:58 +08:00
|
|
|
CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_REGISTER rip,rcx
|
|
|
|
/*CFI_REGISTER rflags,r11*/
|
2008-01-30 20:32:08 +08:00
|
|
|
SWAPGS_UNSAFE_STACK
|
|
|
|
/*
|
|
|
|
* A hypervisor implementation might want to use a label
|
|
|
|
* after the swapgs, so that it can do the swapgs
|
|
|
|
* for the guest and jump here on syscall.
|
|
|
|
*/
|
|
|
|
ENTRY(system_call_after_swapgs)
|
|
|
|
|
2009-01-18 23:38:58 +08:00
|
|
|
movq %rsp,PER_CPU_VAR(old_rsp)
|
2009-01-18 23:38:58 +08:00
|
|
|
movq PER_CPU_VAR(kernel_stack),%rsp
|
2006-07-03 15:24:45 +08:00
|
|
|
/*
|
|
|
|
* No need to follow this irqs off/on section - it's straight
|
|
|
|
* and short:
|
|
|
|
*/
|
2008-01-30 20:32:08 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2005-04-17 06:20:36 +08:00
|
|
|
SAVE_ARGS 8,1
|
2008-11-16 22:29:00 +08:00
|
|
|
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
|
2005-09-13 00:49:24 +08:00
|
|
|
movq %rcx,RIP-ARGOFFSET(%rsp)
|
|
|
|
CFI_REL_OFFSET rip,RIP-ARGOFFSET
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rcx)
|
2008-07-09 17:38:07 +08:00
|
|
|
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
|
2005-04-17 06:20:36 +08:00
|
|
|
jnz tracesys
|
2008-06-24 06:37:04 +08:00
|
|
|
system_call_fastpath:
|
2005-04-17 06:20:36 +08:00
|
|
|
cmpq $__NR_syscall_max,%rax
|
|
|
|
ja badsys
|
|
|
|
movq %r10,%rcx
|
|
|
|
call *sys_call_table(,%rax,8) # XXX: rip relative
|
|
|
|
movq %rax,RAX-ARGOFFSET(%rsp)
|
|
|
|
/*
|
|
|
|
* Syscall return path ending with SYSRET (fast path)
|
2008-11-16 22:29:00 +08:00
|
|
|
* Has incomplete stack frame and undefined top of stack.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
ret_from_sys_call:
|
2005-04-17 06:25:02 +08:00
|
|
|
movl $_TIF_ALLWORK_MASK,%edi
|
2005-04-17 06:20:36 +08:00
|
|
|
/* edi: flagmask */
|
2008-11-16 22:29:00 +08:00
|
|
|
sysret_check:
|
2007-10-12 04:11:12 +08:00
|
|
|
LOCKDEP_SYS_EXIT
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rcx)
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2008-06-24 22:19:35 +08:00
|
|
|
movl TI_flags(%rcx),%edx
|
2005-04-17 06:20:36 +08:00
|
|
|
andl %edi,%edx
|
2008-11-16 22:29:00 +08:00
|
|
|
jnz sysret_careful
|
2006-12-07 09:14:02 +08:00
|
|
|
CFI_REMEMBER_STATE
|
2006-07-03 15:24:45 +08:00
|
|
|
/*
|
|
|
|
* sysretq will re-enable interrupts:
|
|
|
|
*/
|
|
|
|
TRACE_IRQS_ON
|
2005-04-17 06:20:36 +08:00
|
|
|
movq RIP-ARGOFFSET(%rsp),%rcx
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_REGISTER rip,rcx
|
2005-04-17 06:20:36 +08:00
|
|
|
RESTORE_ARGS 0,-ARG_SKIP,1
|
2005-09-13 00:49:24 +08:00
|
|
|
/*CFI_REGISTER rflags,r11*/
|
2009-01-18 23:38:58 +08:00
|
|
|
movq PER_CPU_VAR(old_rsp), %rsp
|
2008-06-25 12:19:28 +08:00
|
|
|
USERGS_SYSRET64
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-12-07 09:14:02 +08:00
|
|
|
CFI_RESTORE_STATE
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Handle reschedules */
|
2008-11-16 22:29:00 +08:00
|
|
|
/* edx: work, edi: workmask */
|
2005-04-17 06:20:36 +08:00
|
|
|
sysret_careful:
|
|
|
|
bt $TIF_NEED_RESCHED,%edx
|
|
|
|
jnc sysret_signal
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2008-01-30 20:32:08 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2005-04-17 06:20:36 +08:00
|
|
|
pushq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-04-17 06:20:36 +08:00
|
|
|
call schedule
|
|
|
|
popq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp sysret_check
|
|
|
|
|
2008-11-16 22:29:00 +08:00
|
|
|
/* Handle a signal */
|
2005-04-17 06:20:36 +08:00
|
|
|
sysret_signal:
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2008-01-30 20:32:08 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2008-06-24 06:37:04 +08:00
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
|
|
bt $TIF_SYSCALL_AUDIT,%edx
|
|
|
|
jc sysret_audit
|
|
|
|
#endif
|
2009-09-23 07:46:34 +08:00
|
|
|
/*
|
|
|
|
* We have a signal, or exit tracing or single-step.
|
|
|
|
* These all wind up with the iret return path anyway,
|
|
|
|
* so just join that path right now.
|
|
|
|
*/
|
|
|
|
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
|
|
|
|
jmp int_check_syscall_exit_work
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-09-13 00:49:24 +08:00
|
|
|
badsys:
|
|
|
|
movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
|
|
|
|
jmp ret_from_sys_call
|
|
|
|
|
2008-06-24 06:37:04 +08:00
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
|
|
/*
|
|
|
|
* Fast path for syscall audit without full syscall trace.
|
|
|
|
* We just call audit_syscall_entry() directly, and then
|
|
|
|
* jump back to the normal fast path.
|
|
|
|
*/
|
|
|
|
auditsys:
|
|
|
|
movq %r10,%r9 /* 6th arg: 4th syscall arg */
|
|
|
|
movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
|
|
|
|
movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
|
|
|
|
movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
|
|
|
|
movq %rax,%rsi /* 2nd arg: syscall number */
|
|
|
|
movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
|
|
|
|
call audit_syscall_entry
|
|
|
|
LOAD_ARGS 0 /* reload call-clobbered registers */
|
|
|
|
jmp system_call_fastpath
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return fast path for syscall audit. Call audit_syscall_exit()
|
|
|
|
* directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
|
|
|
|
* masked off.
|
|
|
|
*/
|
|
|
|
sysret_audit:
|
|
|
|
movq %rax,%rsi /* second arg, syscall return value */
|
|
|
|
cmpq $0,%rax /* is it < 0? */
|
|
|
|
setl %al /* 1 if so, 0 if not */
|
|
|
|
movzbl %al,%edi /* zero-extend that into %edi */
|
|
|
|
inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
|
|
|
|
call audit_syscall_exit
|
|
|
|
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
|
|
|
|
jmp sysret_check
|
|
|
|
#endif /* CONFIG_AUDITSYSCALL */
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Do syscall tracing */
|
2008-11-16 22:29:00 +08:00
|
|
|
tracesys:
|
2008-06-24 06:37:04 +08:00
|
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
|
|
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
|
|
|
|
jz auditsys
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
SAVE_REST
|
2008-03-17 12:59:11 +08:00
|
|
|
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
|
2005-04-17 06:20:36 +08:00
|
|
|
FIXUP_TOP_OF_STACK %rdi
|
|
|
|
movq %rsp,%rdi
|
|
|
|
call syscall_trace_enter
|
2008-07-09 17:38:07 +08:00
|
|
|
/*
|
|
|
|
* Reload arg registers from stack in case ptrace changed them.
|
|
|
|
* We don't reload %rax because syscall_trace_enter() returned
|
|
|
|
* the value it wants us to use in the table lookup.
|
|
|
|
*/
|
|
|
|
LOAD_ARGS ARGOFFSET, 1
|
2005-04-17 06:20:36 +08:00
|
|
|
RESTORE_REST
|
|
|
|
cmpq $__NR_syscall_max,%rax
|
2008-03-17 12:59:11 +08:00
|
|
|
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
|
2005-04-17 06:20:36 +08:00
|
|
|
movq %r10,%rcx /* fixup for C */
|
|
|
|
call *sys_call_table(,%rax,8)
|
2008-03-17 12:59:11 +08:00
|
|
|
movq %rax,RAX-ARGOFFSET(%rsp)
|
2006-04-08 01:50:00 +08:00
|
|
|
/* Use IRET because user could have changed frame */
|
2008-11-16 22:29:00 +08:00
|
|
|
|
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Syscall return path ending with IRET.
|
|
|
|
* Has correct top of stack, but partial stack frame.
|
2006-12-07 09:14:02 +08:00
|
|
|
*/
|
2009-02-24 03:57:01 +08:00
|
|
|
GLOBAL(int_ret_from_sys_call)
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2005-04-17 06:20:36 +08:00
|
|
|
testl $3,CS-ARGOFFSET(%rsp)
|
|
|
|
je retint_restore_args
|
|
|
|
movl $_TIF_ALLWORK_MASK,%edi
|
|
|
|
/* edi: mask to check */
|
2009-02-24 03:57:01 +08:00
|
|
|
GLOBAL(int_with_check)
|
2007-10-12 04:11:12 +08:00
|
|
|
LOCKDEP_SYS_EXIT_IRQ
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rcx)
|
2008-06-24 22:19:35 +08:00
|
|
|
movl TI_flags(%rcx),%edx
|
2005-04-17 06:20:36 +08:00
|
|
|
andl %edi,%edx
|
|
|
|
jnz int_careful
|
2008-06-24 22:19:35 +08:00
|
|
|
andl $~TS_COMPAT,TI_status(%rcx)
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp retint_swapgs
|
|
|
|
|
|
|
|
/* Either reschedule or signal or syscall exit tracking needed. */
|
|
|
|
/* First do a reschedule test. */
|
|
|
|
/* edx: work, edi: workmask */
|
|
|
|
int_careful:
|
|
|
|
bt $TIF_NEED_RESCHED,%edx
|
|
|
|
jnc int_very_careful
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2008-01-30 20:32:08 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2005-04-17 06:20:36 +08:00
|
|
|
pushq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-04-17 06:20:36 +08:00
|
|
|
call schedule
|
|
|
|
popq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp int_with_check
|
|
|
|
|
|
|
|
/* handle signals and tracing -- both require a full stack frame */
|
|
|
|
int_very_careful:
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2008-01-30 20:32:08 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2009-09-23 07:46:34 +08:00
|
|
|
int_check_syscall_exit_work:
|
2005-04-17 06:20:36 +08:00
|
|
|
SAVE_REST
|
2008-11-16 22:29:00 +08:00
|
|
|
/* Check for syscall exit trace */
|
2008-07-09 17:38:07 +08:00
|
|
|
testl $_TIF_WORK_SYSCALL_EXIT,%edx
|
2005-04-17 06:20:36 +08:00
|
|
|
jz int_signal
|
|
|
|
pushq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2008-11-16 22:29:00 +08:00
|
|
|
leaq 8(%rsp),%rdi # &ptregs -> arg1
|
2005-04-17 06:20:36 +08:00
|
|
|
call syscall_trace_leave
|
|
|
|
popq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2008-07-09 17:38:07 +08:00
|
|
|
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp int_restore_rest
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
int_signal:
|
2008-01-26 04:08:29 +08:00
|
|
|
testl $_TIF_DO_NOTIFY_MASK,%edx
|
2005-04-17 06:20:36 +08:00
|
|
|
jz 1f
|
|
|
|
movq %rsp,%rdi # &ptregs -> arg1
|
|
|
|
xorl %esi,%esi # oldset -> arg2
|
|
|
|
call do_notify_resume
|
x86_64: fix delayed signals
On three of the several paths in entry_64.S that call
do_notify_resume() on the way back to user mode, we fail to properly
check again for newly-arrived work that requires another call to
do_notify_resume() before going to user mode. These paths set the
mask to check only _TIF_NEED_RESCHED, but this is wrong. The other
paths that lead to do_notify_resume() do this correctly already, and
entry_32.S does it correctly in all cases.
All paths back to user mode have to check all the _TIF_WORK_MASK
flags at the last possible stage, with interrupts disabled.
Otherwise, we miss any flags (TIF_SIGPENDING for example) that were
set any time after we entered do_notify_resume(). More work flags
can be set (or left set) synchronously inside do_notify_resume(), as
TIF_SIGPENDING can be, or asynchronously by interrupts or other CPUs
(which then send an asynchronous interrupt).
There are many different scenarios that could hit this bug, most of
them races. The simplest one to demonstrate does not require any
race: when one signal has done handler setup at the check before
returning from a syscall, and there is another signal pending that
should be handled. The second signal's handler should interrupt the
first signal handler before it actually starts (so the interrupted PC
is still at the handler's entry point). Instead, it runs away until
the next kernel entry (next syscall, tick, etc).
This test behaves correctly on 32-bit kernels, and fails on 64-bit
(either 32-bit or 64-bit test binary). With this fix, it works.
#define _GNU_SOURCE
#include <stdio.h>
#include <signal.h>
#include <string.h>
#include <sys/ucontext.h>
#ifndef REG_RIP
#define REG_RIP REG_EIP
#endif
static sig_atomic_t hit1, hit2;
static void
handler (int sig, siginfo_t *info, void *ctx)
{
ucontext_t *uc = ctx;
if ((void *) uc->uc_mcontext.gregs[REG_RIP] == &handler)
{
if (sig == SIGUSR1)
hit1 = 1;
else
hit2 = 1;
}
printf ("%s at %#lx\n", strsignal (sig),
uc->uc_mcontext.gregs[REG_RIP]);
}
int
main (void)
{
struct sigaction sa;
sigset_t set;
sigemptyset (&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = &handler;
if (sigaction (SIGUSR1, &sa, NULL)
|| sigaction (SIGUSR2, &sa, NULL))
return 2;
sigemptyset (&set);
sigaddset (&set, SIGUSR1);
sigaddset (&set, SIGUSR2);
if (sigprocmask (SIG_BLOCK, &set, NULL))
return 3;
printf ("main at %p, handler at %p\n", &main, &handler);
raise (SIGUSR1);
raise (SIGUSR2);
if (sigprocmask (SIG_UNBLOCK, &set, NULL))
return 4;
if (hit1 + hit2 == 1)
{
puts ("PASS");
return 0;
}
puts ("FAIL");
return 1;
}
Signed-off-by: Roland McGrath <roland@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-11 05:50:39 +08:00
|
|
|
1: movl $_TIF_WORK_MASK,%edi
|
2005-04-17 06:20:36 +08:00
|
|
|
int_restore_rest:
|
|
|
|
RESTORE_REST
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp int_with_check
|
|
|
|
CFI_ENDPROC
|
2006-12-07 09:14:02 +08:00
|
|
|
END(system_call)
|
2008-11-16 22:29:00 +08:00
|
|
|
|
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Certain special system calls that need to save a complete full stack frame.
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
.macro PTREGSCALL label,func,arg
|
2008-11-21 23:41:55 +08:00
|
|
|
ENTRY(\label)
|
|
|
|
PARTIAL_FRAME 1 8 /* offset 8: return address */
|
|
|
|
subq $REST_SKIP, %rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET REST_SKIP
|
|
|
|
call save_rest
|
|
|
|
DEFAULT_FRAME 0 8 /* offset 8: return address */
|
|
|
|
leaq 8(%rsp), \arg /* pt_regs pointer */
|
|
|
|
call \func
|
|
|
|
jmp ptregscall_common
|
|
|
|
CFI_ENDPROC
|
2006-06-26 19:56:55 +08:00
|
|
|
END(\label)
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
PTREGSCALL stub_clone, sys_clone, %r8
|
|
|
|
PTREGSCALL stub_fork, sys_fork, %rdi
|
|
|
|
PTREGSCALL stub_vfork, sys_vfork, %rdi
|
|
|
|
PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
|
|
|
|
PTREGSCALL stub_iopl, sys_iopl, %rsi
|
|
|
|
|
|
|
|
ENTRY(ptregscall_common)
|
2008-11-21 23:41:55 +08:00
|
|
|
DEFAULT_FRAME 1 8 /* offset 8: return address */
|
|
|
|
RESTORE_TOP_OF_STACK %r11, 8
|
|
|
|
movq_cfi_restore R15+8, r15
|
|
|
|
movq_cfi_restore R14+8, r14
|
|
|
|
movq_cfi_restore R13+8, r13
|
|
|
|
movq_cfi_restore R12+8, r12
|
|
|
|
movq_cfi_restore RBP+8, rbp
|
|
|
|
movq_cfi_restore RBX+8, rbx
|
|
|
|
ret $REST_SKIP /* pop extended registers */
|
2005-04-17 06:20:36 +08:00
|
|
|
CFI_ENDPROC
|
2006-06-26 19:56:55 +08:00
|
|
|
END(ptregscall_common)
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
ENTRY(stub_execve)
|
|
|
|
CFI_STARTPROC
|
|
|
|
popq %r11
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
|
|
CFI_REGISTER rip, r11
|
2005-04-17 06:20:36 +08:00
|
|
|
SAVE_REST
|
|
|
|
FIXUP_TOP_OF_STACK %r11
|
2008-02-26 19:55:57 +08:00
|
|
|
movq %rsp, %rcx
|
2005-04-17 06:20:36 +08:00
|
|
|
call sys_execve
|
|
|
|
RESTORE_TOP_OF_STACK %r11
|
|
|
|
movq %rax,RAX(%rsp)
|
|
|
|
RESTORE_REST
|
|
|
|
jmp int_ret_from_sys_call
|
|
|
|
CFI_ENDPROC
|
2006-06-26 19:56:55 +08:00
|
|
|
END(stub_execve)
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* sigreturn is special because it needs to restore all registers on return.
|
|
|
|
* This cannot be done with SYSRET, so use the IRET return path instead.
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
ENTRY(stub_rt_sigreturn)
|
|
|
|
CFI_STARTPROC
|
2005-09-13 00:49:24 +08:00
|
|
|
addq $8, %rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2005-04-17 06:20:36 +08:00
|
|
|
SAVE_REST
|
|
|
|
movq %rsp,%rdi
|
|
|
|
FIXUP_TOP_OF_STACK %r11
|
|
|
|
call sys_rt_sigreturn
|
|
|
|
movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
|
|
|
|
RESTORE_REST
|
|
|
|
jmp int_ret_from_sys_call
|
|
|
|
CFI_ENDPROC
|
2006-06-26 19:56:55 +08:00
|
|
|
END(stub_rt_sigreturn)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-11-12 05:51:52 +08:00
|
|
|
/*
|
|
|
|
* Build the entry stubs and pointer table with some assembler magic.
|
|
|
|
* We pack 7 stubs into a single 32-byte chunk, which will fit in a
|
|
|
|
* single cache line on all modern x86 implementations.
|
|
|
|
*/
|
|
|
|
.section .init.rodata,"a"
|
|
|
|
ENTRY(interrupt)
|
|
|
|
.text
|
|
|
|
.p2align 5
|
|
|
|
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
|
|
|
ENTRY(irq_entries_start)
|
|
|
|
INTR_FRAME
|
|
|
|
vector=FIRST_EXTERNAL_VECTOR
|
|
|
|
.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
|
|
|
|
.balign 32
|
|
|
|
.rept 7
|
|
|
|
.if vector < NR_VECTORS
|
2008-11-13 02:27:35 +08:00
|
|
|
.if vector <> FIRST_EXTERNAL_VECTOR
|
2008-11-12 05:51:52 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
|
|
.endif
|
|
|
|
1: pushq $(~vector+0x80) /* Note: always in signed byte range */
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2008-11-13 02:27:35 +08:00
|
|
|
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
|
2008-11-12 05:51:52 +08:00
|
|
|
jmp 2f
|
|
|
|
.endif
|
|
|
|
.previous
|
|
|
|
.quad 1b
|
|
|
|
.text
|
|
|
|
vector=vector+1
|
|
|
|
.endif
|
|
|
|
.endr
|
|
|
|
2: jmp common_interrupt
|
|
|
|
.endr
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(irq_entries_start)
|
|
|
|
|
|
|
|
.previous
|
|
|
|
END(interrupt)
|
|
|
|
.previous
|
|
|
|
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Interrupt entry/exit.
|
|
|
|
*
|
|
|
|
* Interrupt entry points save only callee clobbered registers in fast path.
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
*
|
|
|
|
* Entry runs with interrupts off.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-11-13 20:50:20 +08:00
|
|
|
/* 0(%rsp): ~(interrupt number) */
|
2005-04-17 06:20:36 +08:00
|
|
|
.macro interrupt func
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
subq $10*8, %rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET 10*8
|
|
|
|
call save_args
|
2008-11-20 21:40:11 +08:00
|
|
|
PARTIAL_FRAME 0
|
2005-04-17 06:20:36 +08:00
|
|
|
call \func
|
|
|
|
.endm
|
|
|
|
|
2008-11-13 20:50:20 +08:00
|
|
|
/*
|
|
|
|
* The interrupt stubs push (~vector+0x80) onto the stack and
|
|
|
|
* then jump to common_interrupt.
|
|
|
|
*/
|
2008-11-12 05:51:52 +08:00
|
|
|
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
|
|
|
common_interrupt:
|
2005-09-13 00:49:24 +08:00
|
|
|
XCPT_FRAME
|
2008-11-13 20:50:20 +08:00
|
|
|
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
|
2005-04-17 06:20:36 +08:00
|
|
|
interrupt do_IRQ
|
2009-01-18 23:38:58 +08:00
|
|
|
/* 0(%rsp): old_rsp-ARGOFFSET */
|
2005-09-13 00:49:24 +08:00
|
|
|
ret_from_intr:
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2009-01-18 23:38:58 +08:00
|
|
|
decl PER_CPU_VAR(irq_count)
|
2006-06-26 19:57:35 +08:00
|
|
|
leaveq
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_DEF_CFA_REGISTER rsp
|
2006-06-26 19:57:35 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2005-09-13 00:49:24 +08:00
|
|
|
exit_intr:
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rcx)
|
|
|
|
testl $3,CS-ARGOFFSET(%rsp)
|
|
|
|
je retint_kernel
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Interrupt came from user space */
|
|
|
|
/*
|
|
|
|
* Has a correct top of stack, but a partial stack frame
|
|
|
|
* %rcx: thread info. Interrupts off.
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
retint_with_reschedule:
|
|
|
|
movl $_TIF_WORK_MASK,%edi
|
2005-09-13 00:49:24 +08:00
|
|
|
retint_check:
|
2007-10-12 04:11:12 +08:00
|
|
|
LOCKDEP_SYS_EXIT_IRQ
|
2008-06-24 22:19:35 +08:00
|
|
|
movl TI_flags(%rcx),%edx
|
2005-04-17 06:20:36 +08:00
|
|
|
andl %edi,%edx
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_REMEMBER_STATE
|
2005-04-17 06:20:36 +08:00
|
|
|
jnz retint_careful
|
2007-10-12 04:11:12 +08:00
|
|
|
|
|
|
|
retint_swapgs: /* return to user-space */
|
2006-07-03 15:24:45 +08:00
|
|
|
/*
|
|
|
|
* The iretq could re-enable interrupts:
|
|
|
|
*/
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_IRETQ
|
2008-01-30 20:32:08 +08:00
|
|
|
SWAPGS
|
2006-07-03 15:24:45 +08:00
|
|
|
jmp restore_args
|
|
|
|
|
2007-10-12 04:11:12 +08:00
|
|
|
retint_restore_args: /* return to kernel space */
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
2006-07-03 15:24:45 +08:00
|
|
|
/*
|
|
|
|
* The iretq could re-enable interrupts:
|
|
|
|
*/
|
|
|
|
TRACE_IRQS_IRETQ
|
|
|
|
restore_args:
|
2008-02-10 06:24:08 +08:00
|
|
|
RESTORE_ARGS 0,8,0
|
|
|
|
|
2008-02-14 05:29:53 +08:00
|
|
|
irq_return:
|
2008-01-30 20:32:08 +08:00
|
|
|
INTERRUPT_RETURN
|
2008-02-10 06:24:08 +08:00
|
|
|
|
|
|
|
.section __ex_table, "a"
|
|
|
|
.quad irq_return, bad_iret
|
|
|
|
.previous
|
|
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
2008-01-30 20:32:08 +08:00
|
|
|
ENTRY(native_iret)
|
2005-04-17 06:20:36 +08:00
|
|
|
iretq
|
|
|
|
|
|
|
|
.section __ex_table,"a"
|
2008-01-30 20:32:08 +08:00
|
|
|
.quad native_iret, bad_iret
|
2005-04-17 06:20:36 +08:00
|
|
|
.previous
|
2008-02-10 06:24:08 +08:00
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
.section .fixup,"ax"
|
|
|
|
bad_iret:
|
2008-02-07 05:39:43 +08:00
|
|
|
/*
|
|
|
|
* The iret traps when the %cs or %ss being restored is bogus.
|
|
|
|
* We've lost the original trap vector and error code.
|
|
|
|
* #GPF is the most likely one to get for an invalid selector.
|
|
|
|
* So pretend we completed the iret and took the #GPF in user mode.
|
|
|
|
*
|
|
|
|
* We are now running with the kernel GS after exception recovery.
|
|
|
|
* But error_entry expects us to have user GS to match the user %cs,
|
|
|
|
* so swap back.
|
|
|
|
*/
|
|
|
|
pushq $0
|
|
|
|
|
|
|
|
SWAPGS
|
|
|
|
jmp general_protection
|
|
|
|
|
2008-01-30 20:32:08 +08:00
|
|
|
.previous
|
|
|
|
|
2005-09-13 00:49:24 +08:00
|
|
|
/* edi: workmask, edx: work */
|
2005-04-17 06:20:36 +08:00
|
|
|
retint_careful:
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_RESTORE_STATE
|
2005-04-17 06:20:36 +08:00
|
|
|
bt $TIF_NEED_RESCHED,%edx
|
|
|
|
jnc retint_signal
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2008-01-30 20:32:08 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2005-04-17 06:20:36 +08:00
|
|
|
pushq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2005-04-17 06:20:36 +08:00
|
|
|
call schedule
|
2008-11-16 22:29:00 +08:00
|
|
|
popq %rdi
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2005-04-17 06:20:36 +08:00
|
|
|
GET_THREAD_INFO(%rcx)
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp retint_check
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
retint_signal:
|
2008-01-26 04:08:29 +08:00
|
|
|
testl $_TIF_DO_NOTIFY_MASK,%edx
|
2005-05-17 12:53:19 +08:00
|
|
|
jz retint_swapgs
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_ON
|
2008-01-30 20:32:08 +08:00
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
2005-04-17 06:20:36 +08:00
|
|
|
SAVE_REST
|
2008-11-16 22:29:00 +08:00
|
|
|
movq $-1,ORIG_RAX(%rsp)
|
2005-07-29 12:15:48 +08:00
|
|
|
xorl %esi,%esi # oldset
|
2005-04-17 06:20:36 +08:00
|
|
|
movq %rsp,%rdi # &pt_regs
|
|
|
|
call do_notify_resume
|
|
|
|
RESTORE_REST
|
2008-01-30 20:32:08 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
2006-07-03 15:24:45 +08:00
|
|
|
TRACE_IRQS_OFF
|
2005-05-01 23:58:51 +08:00
|
|
|
GET_THREAD_INFO(%rcx)
|
x86_64: fix delayed signals
On three of the several paths in entry_64.S that call
do_notify_resume() on the way back to user mode, we fail to properly
check again for newly-arrived work that requires another call to
do_notify_resume() before going to user mode. These paths set the
mask to check only _TIF_NEED_RESCHED, but this is wrong. The other
paths that lead to do_notify_resume() do this correctly already, and
entry_32.S does it correctly in all cases.
All paths back to user mode have to check all the _TIF_WORK_MASK
flags at the last possible stage, with interrupts disabled.
Otherwise, we miss any flags (TIF_SIGPENDING for example) that were
set any time after we entered do_notify_resume(). More work flags
can be set (or left set) synchronously inside do_notify_resume(), as
TIF_SIGPENDING can be, or asynchronously by interrupts or other CPUs
(which then send an asynchronous interrupt).
There are many different scenarios that could hit this bug, most of
them races. The simplest one to demonstrate does not require any
race: when one signal has done handler setup at the check before
returning from a syscall, and there is another signal pending that
should be handled. The second signal's handler should interrupt the
first signal handler before it actually starts (so the interrupted PC
is still at the handler's entry point). Instead, it runs away until
the next kernel entry (next syscall, tick, etc).
This test behaves correctly on 32-bit kernels, and fails on 64-bit
(either 32-bit or 64-bit test binary). With this fix, it works.
#define _GNU_SOURCE
#include <stdio.h>
#include <signal.h>
#include <string.h>
#include <sys/ucontext.h>
#ifndef REG_RIP
#define REG_RIP REG_EIP
#endif
static sig_atomic_t hit1, hit2;
static void
handler (int sig, siginfo_t *info, void *ctx)
{
ucontext_t *uc = ctx;
if ((void *) uc->uc_mcontext.gregs[REG_RIP] == &handler)
{
if (sig == SIGUSR1)
hit1 = 1;
else
hit2 = 1;
}
printf ("%s at %#lx\n", strsignal (sig),
uc->uc_mcontext.gregs[REG_RIP]);
}
int
main (void)
{
struct sigaction sa;
sigset_t set;
sigemptyset (&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = &handler;
if (sigaction (SIGUSR1, &sa, NULL)
|| sigaction (SIGUSR2, &sa, NULL))
return 2;
sigemptyset (&set);
sigaddset (&set, SIGUSR1);
sigaddset (&set, SIGUSR2);
if (sigprocmask (SIG_BLOCK, &set, NULL))
return 3;
printf ("main at %p, handler at %p\n", &main, &handler);
raise (SIGUSR1);
raise (SIGUSR2);
if (sigprocmask (SIG_UNBLOCK, &set, NULL))
return 4;
if (hit1 + hit2 == 1)
{
puts ("PASS");
return 0;
}
puts ("FAIL");
return 1;
}
Signed-off-by: Roland McGrath <roland@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-11 05:50:39 +08:00
|
|
|
jmp retint_with_reschedule
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
/* Returning to kernel space. Check if we need preemption */
|
|
|
|
/* rcx: threadinfo. interrupts off. */
|
2006-09-26 16:52:29 +08:00
|
|
|
ENTRY(retint_kernel)
|
2008-06-24 22:19:35 +08:00
|
|
|
cmpl $0,TI_preempt_count(%rcx)
|
2005-04-17 06:20:36 +08:00
|
|
|
jnz retint_restore_args
|
2008-06-24 22:19:35 +08:00
|
|
|
bt $TIF_NEED_RESCHED,TI_flags(%rcx)
|
2005-04-17 06:20:36 +08:00
|
|
|
jnc retint_restore_args
|
|
|
|
bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
|
|
|
|
jnc retint_restore_args
|
|
|
|
call preempt_schedule_irq
|
|
|
|
jmp exit_intr
|
2008-11-16 22:29:00 +08:00
|
|
|
#endif
|
2006-06-26 19:56:55 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
CFI_ENDPROC
|
2006-06-26 19:56:55 +08:00
|
|
|
END(common_interrupt)
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* APIC interrupts.
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
2008-11-23 17:08:28 +08:00
|
|
|
.macro apicinterrupt num sym do_sym
|
|
|
|
ENTRY(\sym)
|
2005-09-13 00:49:24 +08:00
|
|
|
INTR_FRAME
|
2006-06-27 17:53:44 +08:00
|
|
|
pushq $~(\num)
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2008-11-23 17:08:28 +08:00
|
|
|
interrupt \do_sym
|
2005-04-17 06:20:36 +08:00
|
|
|
jmp ret_from_intr
|
|
|
|
CFI_ENDPROC
|
2008-11-23 17:08:28 +08:00
|
|
|
END(\sym)
|
|
|
|
.endm
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-11-23 17:08:28 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
|
|
|
|
irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
|
x86: fix panic with interrupts off (needed for MCE)
For some time each panic() called with interrupts disabled
triggered the !irqs_disabled() WARN_ON in smp_call_function(),
producing ugly backtraces and confusing users.
This is a common situation with machine checks for example which
tend to call panic with interrupts disabled, but will also hit
in other situations e.g. panic during early boot. In fact it
means that panic cannot be called in many circumstances, which
would be bad.
This all started with the new fancy queued smp_call_function,
which is then used by the shutdown path to shut down the other
CPUs.
On closer examination it turned out that the fancy RCU
smp_call_function() does lots of things not suitable in a panic
situation anyways, like allocating memory and relying on complex
system state.
I originally tried to patch this over by checking for panic
there, but it was quite complicated and the original patch
was also not very popular. This also didn't fix some of the
underlying complexity problems.
The new code in post 2.6.29 tries to patch around this by
checking for oops_in_progress, but that is not enough to make
this fully safe and I don't think that's a real solution
because panic has to be reliable.
So instead use an own vector to reboot. This makes the reboot
code extremly straight forward, which is definitely a big plus
in a panic situation where it is important to avoid relying on
too much kernel state. The new simple code is also safe to be
called from interupts off region because it is very very simple.
There can be situations where it is important that panic
is reliable. For example on a fatal machine check the panic
is needed to get the system up again and running as quickly
as possible. So it's important that panic is reliable and
all function it calls simple.
This is why I came up with this simple vector scheme.
It's very hard to beat in simplicity. Vectors are not
particularly precious anymore since all big systems are
using per CPU vectors.
Another possibility would have been to use an NMI similar
to kdump, but there is still the problem that NMIs don't
work reliably on some systems due to BIOS issues. NMIs
would have been able to stop CPUs running with interrupts
off too. In the sake of universal reliability I opted for
using a non NMI vector for now.
I put the reboot vector into the highest priority bucket of
the APIC vectors and moved the 64bit UV_BAU message down
instead into the next lower priority.
[ Impact: bug fix, fixes an old regression ]
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-05-28 03:56:52 +08:00
|
|
|
apicinterrupt REBOOT_VECTOR \
|
|
|
|
reboot_interrupt smp_reboot_interrupt
|
2008-11-23 17:08:28 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-01-20 11:36:04 +08:00
|
|
|
#ifdef CONFIG_X86_UV
|
2008-11-27 05:02:10 +08:00
|
|
|
apicinterrupt UV_BAU_MESSAGE \
|
2008-11-23 17:08:28 +08:00
|
|
|
uv_bau_message_intr1 uv_bau_message_interrupt
|
2009-01-20 11:36:04 +08:00
|
|
|
#endif
|
2008-11-23 17:08:28 +08:00
|
|
|
apicinterrupt LOCAL_TIMER_VECTOR \
|
|
|
|
apic_timer_interrupt smp_apic_timer_interrupt
|
2009-03-05 02:56:05 +08:00
|
|
|
apicinterrupt GENERIC_INTERRUPT_VECTOR \
|
|
|
|
generic_interrupt smp_generic_interrupt
|
2005-11-06 00:25:53 +08:00
|
|
|
|
2008-11-16 22:29:00 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2008-11-23 17:08:28 +08:00
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
|
|
|
|
invalidate_interrupt0 smp_invalidate_interrupt
|
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
|
|
|
|
invalidate_interrupt1 smp_invalidate_interrupt
|
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
|
|
|
|
invalidate_interrupt2 smp_invalidate_interrupt
|
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
|
|
|
|
invalidate_interrupt3 smp_invalidate_interrupt
|
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
|
|
|
|
invalidate_interrupt4 smp_invalidate_interrupt
|
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
|
|
|
|
invalidate_interrupt5 smp_invalidate_interrupt
|
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
|
|
|
|
invalidate_interrupt6 smp_invalidate_interrupt
|
|
|
|
apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
|
|
|
|
invalidate_interrupt7 smp_invalidate_interrupt
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2008-11-23 17:08:28 +08:00
|
|
|
apicinterrupt THRESHOLD_APIC_VECTOR \
|
2009-04-29 05:32:56 +08:00
|
|
|
threshold_interrupt smp_threshold_interrupt
|
2008-11-23 17:08:28 +08:00
|
|
|
apicinterrupt THERMAL_APIC_VECTOR \
|
|
|
|
thermal_interrupt smp_thermal_interrupt
|
2008-06-02 21:56:14 +08:00
|
|
|
|
2009-05-28 03:56:54 +08:00
|
|
|
#ifdef CONFIG_X86_MCE
|
|
|
|
apicinterrupt MCE_SELF_VECTOR \
|
|
|
|
mce_self_interrupt smp_mce_self_interrupt
|
|
|
|
#endif
|
|
|
|
|
2008-11-23 17:08:28 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
|
|
|
|
call_function_single_interrupt smp_call_function_single_interrupt
|
|
|
|
apicinterrupt CALL_FUNCTION_VECTOR \
|
|
|
|
call_function_interrupt smp_call_function_interrupt
|
|
|
|
apicinterrupt RESCHEDULE_VECTOR \
|
|
|
|
reschedule_interrupt smp_reschedule_interrupt
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-11-23 17:08:28 +08:00
|
|
|
apicinterrupt ERROR_APIC_VECTOR \
|
|
|
|
error_interrupt smp_error_interrupt
|
|
|
|
apicinterrupt SPURIOUS_APIC_VECTOR \
|
|
|
|
spurious_interrupt smp_spurious_interrupt
|
2008-11-16 22:29:00 +08:00
|
|
|
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 18:02:48 +08:00
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
2009-04-06 17:45:03 +08:00
|
|
|
apicinterrupt LOCAL_PENDING_VECTOR \
|
|
|
|
perf_pending_interrupt smp_perf_pending_interrupt
|
2008-12-03 17:39:53 +08:00
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Exception entry points.
|
2008-11-16 22:29:00 +08:00
|
|
|
*/
|
2008-11-23 17:08:28 +08:00
|
|
|
.macro zeroentry sym do_sym
|
|
|
|
ENTRY(\sym)
|
2005-09-13 00:49:24 +08:00
|
|
|
INTR_FRAME
|
2008-06-25 12:19:31 +08:00
|
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
2008-11-21 22:20:47 +08:00
|
|
|
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
subq $15*8,%rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET 15*8
|
|
|
|
call error_entry
|
2008-11-20 21:40:11 +08:00
|
|
|
DEFAULT_FRAME 0
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
|
|
xorl %esi,%esi /* no error code */
|
2008-11-23 17:08:28 +08:00
|
|
|
call \do_sym
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
jmp error_exit /* %ebx: no swapgs flag */
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ENDPROC
|
2008-11-23 17:08:28 +08:00
|
|
|
END(\sym)
|
|
|
|
.endm
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-11-23 17:08:28 +08:00
|
|
|
.macro paranoidzeroentry sym do_sym
|
2008-11-24 20:24:28 +08:00
|
|
|
ENTRY(\sym)
|
2008-11-21 23:44:28 +08:00
|
|
|
INTR_FRAME
|
|
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
|
|
|
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
|
|
subq $15*8, %rsp
|
|
|
|
call save_paranoid
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
|
|
xorl %esi,%esi /* no error code */
|
2008-11-23 17:08:28 +08:00
|
|
|
call \do_sym
|
2008-11-21 23:44:28 +08:00
|
|
|
jmp paranoid_exit /* %ebx: no swapgs flag */
|
|
|
|
CFI_ENDPROC
|
2008-11-24 20:24:28 +08:00
|
|
|
END(\sym)
|
2008-11-23 17:08:28 +08:00
|
|
|
.endm
|
2008-11-21 23:44:28 +08:00
|
|
|
|
2008-11-23 17:08:28 +08:00
|
|
|
.macro paranoidzeroentry_ist sym do_sym ist
|
2008-11-24 20:24:28 +08:00
|
|
|
ENTRY(\sym)
|
2008-11-28 02:10:08 +08:00
|
|
|
INTR_FRAME
|
2008-11-21 23:44:28 +08:00
|
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
|
|
|
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
|
|
subq $15*8, %rsp
|
|
|
|
call save_paranoid
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
|
|
xorl %esi,%esi /* no error code */
|
2009-01-13 19:41:35 +08:00
|
|
|
PER_CPU(init_tss, %rbp)
|
|
|
|
subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
|
2008-11-23 17:08:28 +08:00
|
|
|
call \do_sym
|
2009-01-13 19:41:35 +08:00
|
|
|
addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
|
2008-11-21 23:44:28 +08:00
|
|
|
jmp paranoid_exit /* %ebx: no swapgs flag */
|
|
|
|
CFI_ENDPROC
|
2008-11-24 20:24:28 +08:00
|
|
|
END(\sym)
|
2008-11-23 17:08:28 +08:00
|
|
|
.endm
|
2008-11-21 23:44:28 +08:00
|
|
|
|
2008-11-24 20:24:28 +08:00
|
|
|
.macro errorentry sym do_sym
|
2008-11-23 17:08:28 +08:00
|
|
|
ENTRY(\sym)
|
2005-09-13 00:49:24 +08:00
|
|
|
XCPT_FRAME
|
2008-06-25 12:19:31 +08:00
|
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
subq $15*8,%rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET 15*8
|
|
|
|
call error_entry
|
2008-11-20 21:40:11 +08:00
|
|
|
DEFAULT_FRAME 0
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
|
|
movq ORIG_RAX(%rsp),%rsi /* get error code */
|
|
|
|
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
|
2008-11-23 17:08:28 +08:00
|
|
|
call \do_sym
|
x86: move entry_64.S register saving out of the macros
Here is a combined patch that moves "save_args" out-of-line for
the interrupt macro and moves "error_entry" mostly out-of-line
for the zeroentry and errorentry macros.
The save_args function becomes really straightforward and easy
to understand, with the possible exception of the stack switch
code, which now needs to copy the return address of to the
calling function. Normal interrupts arrive with ((~vector)-0x80)
on the stack, which gets adjusted in common_interrupt:
<common_interrupt>:
(5) addq $0xffffffffffffff80,(%rsp) /* -> ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80214290 <do_IRQ>
<ret_from_intr>:
...
An apic interrupt stub now look like this:
<thermal_interrupt>:
(5) pushq $0xffffffffffffff05 /* ~(vector) */
(4) sub $0x50,%rsp /* space for registers */
(5) callq ffffffff80211290 <save_args>
(5) callq ffffffff80212b8f <smp_thermal_interrupt>
(5) jmpq ffffffff80211f93 <ret_from_intr>
Similarly the exception handler register saving function becomes
simpler, without the need of any parameter shuffling. The stub
for an exception without errorcode looks like this:
<overflow>:
(6) callq *0x1cad12(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(2) pushq $0xffffffffffffffff /* no syscall */
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(2) xor %esi,%esi /* no error code */
(5) callq ffffffff80213446 <do_overflow>
(5) jmpq ffffffff8030e460 <error_exit>
And one for an exception with errorcode like this:
<segment_not_present>:
(6) callq *0x1cab92(%rip) # ffffffff803dd448 <pv_irq_ops+0x38>
(4) sub $0x78,%rsp /* space for registers */
(5) callq ffffffff8030e3b0 <error_entry>
(3) mov %rsp,%rdi /* pt_regs pointer */
(5) mov 0x78(%rsp),%rsi /* load error code */
(9) movq $0xffffffffffffffff,0x78(%rsp) /* no syscall */
(5) callq ffffffff80213209 <do_segment_not_present>
(5) jmpq ffffffff8030e460 <error_exit>
Unfortunately, this last type is more than 32 bytes. But the total space
savings due to this patch is about 2500 bytes on an smp-configuration,
and I think the code is clearer than it was before. The tested kernels
were non-paravirt ones (i.e., without the indirect call at the top of
the exception handlers).
Anyhow, I tested this patch on top of a recent -tip. The machine
was an 2x4-core Xeon at 2333MHz. Measured where the delays between
(almost-)adjacent rdtsc instructions. The graphs show how much
time is spent outside of the program as a function of the measured
delay. The area under the graph represents the total time spent
outside the program. Eight instances of the rdtsctest were
started, each pinned to a single cpu. The histogams are added.
For each kernel two measurements were done: one in mostly idle
condition, the other while running "bonnie++ -f", bound to cpu 0.
Each measurement took 40 minutes runtime. See the attached graphs
for the results. The graphs overlap almost everywhere, but there
are small differences.
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-19 08:18:11 +08:00
|
|
|
jmp error_exit /* %ebx: no swapgs flag */
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ENDPROC
|
2008-11-23 17:08:28 +08:00
|
|
|
END(\sym)
|
|
|
|
.endm
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* error code is on the stack already */
|
2008-11-24 20:24:28 +08:00
|
|
|
.macro paranoiderrorentry sym do_sym
|
2008-11-23 17:08:28 +08:00
|
|
|
ENTRY(\sym)
|
2008-11-21 23:44:28 +08:00
|
|
|
XCPT_FRAME
|
|
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
|
|
|
subq $15*8,%rsp
|
2008-11-21 23:43:18 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 15*8
|
|
|
|
call save_paranoid
|
|
|
|
DEFAULT_FRAME 0
|
2008-09-26 20:03:03 +08:00
|
|
|
TRACE_IRQS_OFF
|
2008-11-21 23:44:28 +08:00
|
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
|
|
movq ORIG_RAX(%rsp),%rsi /* get error code */
|
|
|
|
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
|
2008-11-23 17:08:28 +08:00
|
|
|
call \do_sym
|
2008-11-21 23:44:28 +08:00
|
|
|
jmp paranoid_exit /* %ebx: no swapgs flag */
|
|
|
|
CFI_ENDPROC
|
2008-11-23 17:08:28 +08:00
|
|
|
END(\sym)
|
|
|
|
.endm
|
|
|
|
|
|
|
|
zeroentry divide_error do_divide_error
|
|
|
|
zeroentry overflow do_overflow
|
|
|
|
zeroentry bounds do_bounds
|
|
|
|
zeroentry invalid_op do_invalid_op
|
|
|
|
zeroentry device_not_available do_device_not_available
|
2008-11-24 20:24:28 +08:00
|
|
|
paranoiderrorentry double_fault do_double_fault
|
2008-11-23 17:08:28 +08:00
|
|
|
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
|
|
|
|
errorentry invalid_TSS do_invalid_TSS
|
|
|
|
errorentry segment_not_present do_segment_not_present
|
|
|
|
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
|
|
|
|
zeroentry coprocessor_error do_coprocessor_error
|
|
|
|
errorentry alignment_check do_alignment_check
|
|
|
|
zeroentry simd_coprocessor_error do_simd_coprocessor_error
|
2006-07-03 15:24:45 +08:00
|
|
|
|
2008-11-28 02:10:08 +08:00
|
|
|
/* Reload gs selector with exception handling */
|
|
|
|
/* edi: new selector */
|
2008-06-25 12:19:32 +08:00
|
|
|
ENTRY(native_load_gs_index)
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_STARTPROC
|
2005-04-17 06:20:36 +08:00
|
|
|
pushf
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
2009-01-29 06:35:03 +08:00
|
|
|
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
|
2008-11-28 02:10:08 +08:00
|
|
|
SWAPGS
|
2008-11-16 22:29:00 +08:00
|
|
|
gs_change:
|
2008-11-28 02:10:08 +08:00
|
|
|
movl %edi,%gs
|
2005-04-17 06:20:36 +08:00
|
|
|
2: mfence /* workaround */
|
2008-01-30 20:32:08 +08:00
|
|
|
SWAPGS
|
2008-11-28 02:10:08 +08:00
|
|
|
popf
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2008-11-28 02:10:08 +08:00
|
|
|
ret
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ENDPROC
|
2008-11-23 17:15:32 +08:00
|
|
|
END(native_load_gs_index)
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2008-11-28 02:10:08 +08:00
|
|
|
.section __ex_table,"a"
|
|
|
|
.align 8
|
|
|
|
.quad gs_change,bad_gs
|
|
|
|
.previous
|
|
|
|
.section .fixup,"ax"
|
2005-04-17 06:20:36 +08:00
|
|
|
/* running with kernelgs */
|
2008-11-16 22:29:00 +08:00
|
|
|
bad_gs:
|
2008-01-30 20:32:08 +08:00
|
|
|
SWAPGS /* switch back to user gs */
|
2005-04-17 06:20:36 +08:00
|
|
|
xorl %eax,%eax
|
2008-11-28 02:10:08 +08:00
|
|
|
movl %eax,%gs
|
|
|
|
jmp 2b
|
|
|
|
.previous
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Create a kernel thread.
|
|
|
|
*
|
|
|
|
* C extern interface:
|
|
|
|
* extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
|
|
|
*
|
|
|
|
* asm input arguments:
|
|
|
|
* rdi: fn, rsi: arg, rdx: flags
|
|
|
|
*/
|
|
|
|
ENTRY(kernel_thread)
|
|
|
|
CFI_STARTPROC
|
|
|
|
FAKE_STACK_FRAME $child_rip
|
|
|
|
SAVE_ALL
|
|
|
|
|
|
|
|
# rdi: flags, rsi: usp, rdx: will be &pt_regs
|
|
|
|
movq %rdx,%rdi
|
|
|
|
orq kernel_thread_flags(%rip),%rdi
|
|
|
|
movq $-1, %rsi
|
|
|
|
movq %rsp, %rdx
|
|
|
|
|
|
|
|
xorl %r8d,%r8d
|
|
|
|
xorl %r9d,%r9d
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
# clone now
|
|
|
|
call do_fork
|
|
|
|
movq %rax,RAX(%rsp)
|
|
|
|
xorl %edi,%edi
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It isn't worth to check for reschedule here,
|
|
|
|
* so internally to the x86_64 port you can rely on kernel_thread()
|
|
|
|
* not to reschedule the child before returning, this avoids the need
|
|
|
|
* of hacks for example to fork off the per-CPU idle tasks.
|
2008-11-28 02:10:08 +08:00
|
|
|
* [Hopefully no generic code relies on the reschedule -AK]
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
RESTORE_ALL
|
|
|
|
UNFAKE_STACK_FRAME
|
|
|
|
ret
|
|
|
|
CFI_ENDPROC
|
2008-11-23 17:15:32 +08:00
|
|
|
END(kernel_thread)
|
2008-11-16 22:29:00 +08:00
|
|
|
|
2008-11-27 03:17:00 +08:00
|
|
|
ENTRY(child_rip)
|
2006-08-31 01:37:08 +08:00
|
|
|
pushq $0 # fake return address
|
|
|
|
CFI_STARTPROC
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Here we are in the child and the registers are set as they were
|
|
|
|
* at kernel_thread() invocation in the parent.
|
|
|
|
*/
|
|
|
|
movq %rdi, %rax
|
|
|
|
movq %rsi, %rdi
|
|
|
|
call *%rax
|
|
|
|
# exit
|
2007-10-18 00:04:33 +08:00
|
|
|
mov %eax, %edi
|
2005-04-17 06:20:36 +08:00
|
|
|
call do_exit
|
2008-11-23 22:47:10 +08:00
|
|
|
ud2 # padding for call trace
|
2006-08-31 01:37:08 +08:00
|
|
|
CFI_ENDPROC
|
2008-11-23 17:15:32 +08:00
|
|
|
END(child_rip)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
|
|
|
|
*
|
|
|
|
* C extern interface:
|
|
|
|
* extern long execve(char *name, char **argv, char **envp)
|
|
|
|
*
|
|
|
|
* asm input arguments:
|
|
|
|
* rdi: name, rsi: argv, rdx: envp
|
|
|
|
*
|
|
|
|
* We want to fallback into:
|
2008-02-26 19:55:57 +08:00
|
|
|
* extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* do_sys_execve asm fallback arguments:
|
2008-02-26 19:55:57 +08:00
|
|
|
* rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-10-02 17:18:31 +08:00
|
|
|
ENTRY(kernel_execve)
|
2005-04-17 06:20:36 +08:00
|
|
|
CFI_STARTPROC
|
|
|
|
FAKE_STACK_FRAME $0
|
2008-11-16 22:29:00 +08:00
|
|
|
SAVE_ALL
|
2008-02-26 19:55:57 +08:00
|
|
|
movq %rsp,%rcx
|
2005-04-17 06:20:36 +08:00
|
|
|
call sys_execve
|
2008-11-16 22:29:00 +08:00
|
|
|
movq %rax, RAX(%rsp)
|
2005-04-17 06:20:36 +08:00
|
|
|
RESTORE_REST
|
|
|
|
testq %rax,%rax
|
|
|
|
je int_ret_from_sys_call
|
|
|
|
RESTORE_ARGS
|
|
|
|
UNFAKE_STACK_FRAME
|
|
|
|
ret
|
|
|
|
CFI_ENDPROC
|
2008-11-23 17:15:32 +08:00
|
|
|
END(kernel_execve)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-03 04:37:28 +08:00
|
|
|
/* Call softirq on interrupt stack. Interrupts are off. */
|
2005-07-29 12:15:49 +08:00
|
|
|
ENTRY(call_softirq)
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_STARTPROC
|
2006-08-03 04:37:28 +08:00
|
|
|
push %rbp
|
|
|
|
CFI_ADJUST_CFA_OFFSET 8
|
|
|
|
CFI_REL_OFFSET rbp,0
|
|
|
|
mov %rsp,%rbp
|
|
|
|
CFI_DEF_CFA_REGISTER rbp
|
2009-01-18 23:38:58 +08:00
|
|
|
incl PER_CPU_VAR(irq_count)
|
2009-01-18 23:38:58 +08:00
|
|
|
cmove PER_CPU_VAR(irq_stack_ptr),%rsp
|
2006-08-03 04:37:28 +08:00
|
|
|
push %rbp # backlink for old unwinder
|
2005-07-29 12:15:49 +08:00
|
|
|
call __do_softirq
|
2006-08-03 04:37:28 +08:00
|
|
|
leaveq
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_DEF_CFA_REGISTER rsp
|
2006-08-03 04:37:28 +08:00
|
|
|
CFI_ADJUST_CFA_OFFSET -8
|
2009-01-18 23:38:58 +08:00
|
|
|
decl PER_CPU_VAR(irq_count)
|
2005-07-29 12:15:49 +08:00
|
|
|
ret
|
2005-09-13 00:49:24 +08:00
|
|
|
CFI_ENDPROC
|
2008-11-23 17:15:32 +08:00
|
|
|
END(call_softirq)
|
2007-06-23 08:29:25 +08:00
|
|
|
|
2008-07-09 06:06:49 +08:00
|
|
|
#ifdef CONFIG_XEN
|
2008-11-23 17:08:28 +08:00
|
|
|
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
|
2008-07-09 06:06:49 +08:00
|
|
|
|
|
|
|
/*
|
2008-11-28 02:10:08 +08:00
|
|
|
* A note on the "critical region" in our callback handler.
|
|
|
|
* We want to avoid stacking callback handlers due to events occurring
|
|
|
|
* during handling of the last event. To do this, we keep events disabled
|
|
|
|
* until we've done all processing. HOWEVER, we must enable events before
|
|
|
|
* popping the stack frame (can't be done atomically) and so it would still
|
|
|
|
* be possible to get enough handler activations to overflow the stack.
|
|
|
|
* Although unlikely, bugs of that kind are hard to track down, so we'd
|
|
|
|
* like to avoid the possibility.
|
|
|
|
* So, on entry to the handler we detect whether we interrupted an
|
|
|
|
* existing activation in its critical region -- if so, we pop the current
|
|
|
|
* activation and restart the handler using the previous one.
|
|
|
|
*/
|
2008-07-09 06:06:49 +08:00
|
|
|
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
|
|
|
|
CFI_STARTPROC
|
2008-11-28 02:10:08 +08:00
|
|
|
/*
|
|
|
|
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
|
|
|
|
* see the correct pointer to the pt_regs
|
|
|
|
*/
|
2008-07-09 06:06:49 +08:00
|
|
|
movq %rdi, %rsp # we don't return, adjust the stack frame
|
|
|
|
CFI_ENDPROC
|
2008-11-20 21:40:11 +08:00
|
|
|
DEFAULT_FRAME
|
2009-01-18 23:38:58 +08:00
|
|
|
11: incl PER_CPU_VAR(irq_count)
|
2008-07-09 06:06:49 +08:00
|
|
|
movq %rsp,%rbp
|
|
|
|
CFI_DEF_CFA_REGISTER rbp
|
2009-01-18 23:38:58 +08:00
|
|
|
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
|
2008-07-09 06:06:49 +08:00
|
|
|
pushq %rbp # backlink for old unwinder
|
|
|
|
call xen_evtchn_do_upcall
|
|
|
|
popq %rsp
|
|
|
|
CFI_DEF_CFA_REGISTER rsp
|
2009-01-18 23:38:58 +08:00
|
|
|
decl PER_CPU_VAR(irq_count)
|
2008-07-09 06:06:49 +08:00
|
|
|
jmp error_exit
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(do_hypervisor_callback)
|
|
|
|
|
|
|
|
/*
|
2008-11-28 02:10:08 +08:00
|
|
|
* Hypervisor uses this for application faults while it executes.
|
|
|
|
* We get here for two reasons:
|
|
|
|
* 1. Fault while reloading DS, ES, FS or GS
|
|
|
|
* 2. Fault while executing IRET
|
|
|
|
* Category 1 we do not need to fix up as Xen has already reloaded all segment
|
|
|
|
* registers that could be reloaded and zeroed the others.
|
|
|
|
* Category 2 we fix up by killing the current process. We cannot use the
|
|
|
|
* normal Linux return path in this case because if we use the IRET hypercall
|
|
|
|
* to pop the stack frame we end up in an infinite loop of failsafe callbacks.
|
|
|
|
* We distinguish between categories by comparing each saved segment register
|
|
|
|
* with its current contents: any discrepancy means we in category 1.
|
|
|
|
*/
|
2008-07-09 06:06:49 +08:00
|
|
|
ENTRY(xen_failsafe_callback)
|
2008-11-20 21:40:11 +08:00
|
|
|
INTR_FRAME 1 (6*8)
|
|
|
|
/*CFI_REL_OFFSET gs,GS*/
|
|
|
|
/*CFI_REL_OFFSET fs,FS*/
|
|
|
|
/*CFI_REL_OFFSET es,ES*/
|
|
|
|
/*CFI_REL_OFFSET ds,DS*/
|
|
|
|
CFI_REL_OFFSET r11,8
|
|
|
|
CFI_REL_OFFSET rcx,0
|
2008-07-09 06:06:49 +08:00
|
|
|
movw %ds,%cx
|
|
|
|
cmpw %cx,0x10(%rsp)
|
|
|
|
CFI_REMEMBER_STATE
|
|
|
|
jne 1f
|
|
|
|
movw %es,%cx
|
|
|
|
cmpw %cx,0x18(%rsp)
|
|
|
|
jne 1f
|
|
|
|
movw %fs,%cx
|
|
|
|
cmpw %cx,0x20(%rsp)
|
|
|
|
jne 1f
|
|
|
|
movw %gs,%cx
|
|
|
|
cmpw %cx,0x28(%rsp)
|
|
|
|
jne 1f
|
|
|
|
/* All segments match their saved values => Category 2 (Bad IRET). */
|
|
|
|
movq (%rsp),%rcx
|
|
|
|
CFI_RESTORE rcx
|
|
|
|
movq 8(%rsp),%r11
|
|
|
|
CFI_RESTORE r11
|
|
|
|
addq $0x30,%rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET -0x30
|
2008-11-21 22:20:47 +08:00
|
|
|
pushq_cfi $0 /* RIP */
|
|
|
|
pushq_cfi %r11
|
|
|
|
pushq_cfi %rcx
|
2008-07-09 06:07:09 +08:00
|
|
|
jmp general_protection
|
2008-07-09 06:06:49 +08:00
|
|
|
CFI_RESTORE_STATE
|
|
|
|
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
|
|
|
|
movq (%rsp),%rcx
|
|
|
|
CFI_RESTORE rcx
|
|
|
|
movq 8(%rsp),%r11
|
|
|
|
CFI_RESTORE r11
|
|
|
|
addq $0x30,%rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET -0x30
|
2008-11-21 22:20:47 +08:00
|
|
|
pushq_cfi $0
|
2008-07-09 06:06:49 +08:00
|
|
|
SAVE_ALL
|
|
|
|
jmp error_exit
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(xen_failsafe_callback)
|
|
|
|
|
|
|
|
#endif /* CONFIG_XEN */
|
2008-11-24 20:24:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some functions should be protected against kprobes
|
|
|
|
*/
|
|
|
|
.pushsection .kprobes.text, "ax"
|
|
|
|
|
|
|
|
paranoidzeroentry_ist debug do_debug DEBUG_STACK
|
|
|
|
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
|
|
|
|
paranoiderrorentry stack_segment do_stack_segment
|
2009-03-30 10:56:29 +08:00
|
|
|
#ifdef CONFIG_XEN
|
|
|
|
zeroentry xen_debug do_debug
|
|
|
|
zeroentry xen_int3 do_int3
|
|
|
|
errorentry xen_stack_segment do_stack_segment
|
|
|
|
#endif
|
2008-11-24 20:24:28 +08:00
|
|
|
errorentry general_protection do_general_protection
|
|
|
|
errorentry page_fault do_page_fault
|
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-04-28 01:25:48 +08:00
|
|
|
paranoidzeroentry machine_check *machine_check_vector(%rip)
|
2008-11-24 20:24:28 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2008-11-28 02:10:08 +08:00
|
|
|
* "Paranoid" exit path from exception stack.
|
|
|
|
* Paranoid because this is used by NMIs and cannot take
|
2008-11-24 20:24:28 +08:00
|
|
|
* any kernel state for granted.
|
|
|
|
* We don't do kernel preemption checks here, because only
|
|
|
|
* NMI should be common and it does not enable IRQs and
|
|
|
|
* cannot get reschedule ticks.
|
|
|
|
*
|
|
|
|
* "trace" is 0 for the NMI handler only, because irq-tracing
|
|
|
|
* is fundamentally NMI-unsafe. (we cannot change the soft and
|
|
|
|
* hard flags at once, atomically)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ebx: no swapgs flag */
|
|
|
|
ENTRY(paranoid_exit)
|
|
|
|
INTR_FRAME
|
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
testl %ebx,%ebx /* swapgs needed? */
|
|
|
|
jnz paranoid_restore
|
|
|
|
testl $3,CS(%rsp)
|
|
|
|
jnz paranoid_userspace
|
|
|
|
paranoid_swapgs:
|
|
|
|
TRACE_IRQS_IRETQ 0
|
|
|
|
SWAPGS_UNSAFE_STACK
|
2009-04-17 20:33:52 +08:00
|
|
|
RESTORE_ALL 8
|
|
|
|
jmp irq_return
|
2008-11-24 20:24:28 +08:00
|
|
|
paranoid_restore:
|
2009-04-17 20:33:52 +08:00
|
|
|
TRACE_IRQS_IRETQ 0
|
2008-11-24 20:24:28 +08:00
|
|
|
RESTORE_ALL 8
|
|
|
|
jmp irq_return
|
|
|
|
paranoid_userspace:
|
|
|
|
GET_THREAD_INFO(%rcx)
|
|
|
|
movl TI_flags(%rcx),%ebx
|
|
|
|
andl $_TIF_WORK_MASK,%ebx
|
|
|
|
jz paranoid_swapgs
|
|
|
|
movq %rsp,%rdi /* &pt_regs */
|
|
|
|
call sync_regs
|
|
|
|
movq %rax,%rsp /* switch stack for scheduling */
|
|
|
|
testl $_TIF_NEED_RESCHED,%ebx
|
|
|
|
jnz paranoid_schedule
|
|
|
|
movl %ebx,%edx /* arg3: thread flags */
|
|
|
|
TRACE_IRQS_ON
|
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
xorl %esi,%esi /* arg2: oldset */
|
|
|
|
movq %rsp,%rdi /* arg1: &pt_regs */
|
|
|
|
call do_notify_resume
|
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
jmp paranoid_userspace
|
|
|
|
paranoid_schedule:
|
|
|
|
TRACE_IRQS_ON
|
|
|
|
ENABLE_INTERRUPTS(CLBR_ANY)
|
|
|
|
call schedule
|
|
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
jmp paranoid_userspace
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(paranoid_exit)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Exception entry point. This expects an error code/orig_rax on the stack.
|
|
|
|
* returns in "no swapgs flag" in %ebx.
|
|
|
|
*/
|
|
|
|
ENTRY(error_entry)
|
|
|
|
XCPT_FRAME
|
|
|
|
CFI_ADJUST_CFA_OFFSET 15*8
|
|
|
|
/* oldrax contains error code */
|
|
|
|
cld
|
|
|
|
movq_cfi rdi, RDI+8
|
|
|
|
movq_cfi rsi, RSI+8
|
|
|
|
movq_cfi rdx, RDX+8
|
|
|
|
movq_cfi rcx, RCX+8
|
|
|
|
movq_cfi rax, RAX+8
|
|
|
|
movq_cfi r8, R8+8
|
|
|
|
movq_cfi r9, R9+8
|
|
|
|
movq_cfi r10, R10+8
|
|
|
|
movq_cfi r11, R11+8
|
|
|
|
movq_cfi rbx, RBX+8
|
|
|
|
movq_cfi rbp, RBP+8
|
|
|
|
movq_cfi r12, R12+8
|
|
|
|
movq_cfi r13, R13+8
|
|
|
|
movq_cfi r14, R14+8
|
|
|
|
movq_cfi r15, R15+8
|
|
|
|
xorl %ebx,%ebx
|
|
|
|
testl $3,CS+8(%rsp)
|
|
|
|
je error_kernelspace
|
|
|
|
error_swapgs:
|
|
|
|
SWAPGS
|
|
|
|
error_sti:
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
ret
|
|
|
|
CFI_ENDPROC
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There are two places in the kernel that can potentially fault with
|
|
|
|
* usergs. Handle them here. The exception handlers after iret run with
|
|
|
|
* kernel gs again, so don't set the user space flag. B stepping K8s
|
|
|
|
* sometimes report an truncated RIP for IRET exceptions returning to
|
|
|
|
* compat mode. Check for these here too.
|
|
|
|
*/
|
|
|
|
error_kernelspace:
|
|
|
|
incl %ebx
|
|
|
|
leaq irq_return(%rip),%rcx
|
|
|
|
cmpq %rcx,RIP+8(%rsp)
|
|
|
|
je error_swapgs
|
|
|
|
movl %ecx,%ecx /* zero extend */
|
|
|
|
cmpq %rcx,RIP+8(%rsp)
|
|
|
|
je error_swapgs
|
|
|
|
cmpq $gs_change,RIP+8(%rsp)
|
2008-11-28 02:10:08 +08:00
|
|
|
je error_swapgs
|
2008-11-24 20:24:28 +08:00
|
|
|
jmp error_sti
|
|
|
|
END(error_entry)
|
|
|
|
|
|
|
|
|
|
|
|
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
|
|
|
|
ENTRY(error_exit)
|
|
|
|
DEFAULT_FRAME
|
|
|
|
movl %ebx,%eax
|
|
|
|
RESTORE_REST
|
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
TRACE_IRQS_OFF
|
|
|
|
GET_THREAD_INFO(%rcx)
|
|
|
|
testl %eax,%eax
|
|
|
|
jne retint_kernel
|
|
|
|
LOCKDEP_SYS_EXIT_IRQ
|
|
|
|
movl TI_flags(%rcx),%edx
|
|
|
|
movl $_TIF_WORK_MASK,%edi
|
|
|
|
andl %edi,%edx
|
|
|
|
jnz retint_careful
|
|
|
|
jmp retint_swapgs
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(error_exit)
|
|
|
|
|
|
|
|
|
|
|
|
/* runs on exception stack */
|
|
|
|
ENTRY(nmi)
|
|
|
|
INTR_FRAME
|
|
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
|
|
|
pushq_cfi $-1
|
|
|
|
subq $15*8, %rsp
|
|
|
|
CFI_ADJUST_CFA_OFFSET 15*8
|
|
|
|
call save_paranoid
|
|
|
|
DEFAULT_FRAME 0
|
|
|
|
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
|
|
|
|
movq %rsp,%rdi
|
|
|
|
movq $-1,%rsi
|
|
|
|
call do_nmi
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
/* paranoidexit; without TRACE_IRQS_OFF */
|
|
|
|
/* ebx: no swapgs flag */
|
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
testl %ebx,%ebx /* swapgs needed? */
|
|
|
|
jnz nmi_restore
|
|
|
|
testl $3,CS(%rsp)
|
|
|
|
jnz nmi_userspace
|
|
|
|
nmi_swapgs:
|
|
|
|
SWAPGS_UNSAFE_STACK
|
|
|
|
nmi_restore:
|
|
|
|
RESTORE_ALL 8
|
|
|
|
jmp irq_return
|
|
|
|
nmi_userspace:
|
|
|
|
GET_THREAD_INFO(%rcx)
|
|
|
|
movl TI_flags(%rcx),%ebx
|
|
|
|
andl $_TIF_WORK_MASK,%ebx
|
|
|
|
jz nmi_swapgs
|
|
|
|
movq %rsp,%rdi /* &pt_regs */
|
|
|
|
call sync_regs
|
|
|
|
movq %rax,%rsp /* switch stack for scheduling */
|
|
|
|
testl $_TIF_NEED_RESCHED,%ebx
|
|
|
|
jnz nmi_schedule
|
|
|
|
movl %ebx,%edx /* arg3: thread flags */
|
|
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
xorl %esi,%esi /* arg2: oldset */
|
|
|
|
movq %rsp,%rdi /* arg1: &pt_regs */
|
|
|
|
call do_notify_resume
|
|
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
jmp nmi_userspace
|
|
|
|
nmi_schedule:
|
|
|
|
ENABLE_INTERRUPTS(CLBR_ANY)
|
|
|
|
call schedule
|
|
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
|
|
|
jmp nmi_userspace
|
|
|
|
CFI_ENDPROC
|
|
|
|
#else
|
|
|
|
jmp paranoid_exit
|
2008-11-28 02:10:08 +08:00
|
|
|
CFI_ENDPROC
|
2008-11-24 20:24:28 +08:00
|
|
|
#endif
|
|
|
|
END(nmi)
|
|
|
|
|
|
|
|
ENTRY(ignore_sysret)
|
|
|
|
CFI_STARTPROC
|
|
|
|
mov $-ENOSYS,%eax
|
|
|
|
sysret
|
|
|
|
CFI_ENDPROC
|
|
|
|
END(ignore_sysret)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End of kprobes section
|
|
|
|
*/
|
|
|
|
.popsection
|