mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 15:04:27 +08:00
4da9f33026
this has been brought into a shape which is maintainable and actually works. This final version was done by Sasha Levin who took it up after Intel dropped the ball. Sasha discovered that the SGX (sic!) offerings out there ship rogue kernel modules enabling FSGSBASE behind the kernels back which opens an instantanious unpriviledged root hole. The FSGSBASE instructions provide a considerable speedup of the context switch path and enable user space to write GSBASE without kernel interaction. This enablement requires careful handling of the exception entries which go through the paranoid entry path as they cannot longer rely on the assumption that user GSBASE is positive (as enforced via prctl() on non FSGSBASE enabled systemn). All other entries (syscalls, interrupts and exceptions) can still just utilize SWAPGS unconditionally when the entry comes from user space. Converting these entries to use FSGSBASE has no benefit as SWAPGS is only marginally slower than WRGSBASE and locating and retrieving the kernel GSBASE value is not a free operation either. The real benefit of RD/WRGSBASE is the avoidance of the MSR reads and writes. The changes come with appropriate selftests and have held up in field testing against the (sanitized) Graphene-SGX driver. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl8pGnoTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoTYJD/9873GkwvGcc/Vq/dJH1szGTgFftPyZ c/Y9gzx7EGBPLo25BS820L+ZlynzXHDxExKfCEaD10TZfe5XIc1vYNR0J74M2NmK IBgEDstJeW93ai+rHCFRXIevhpzU4GgGYJ1MeeOgbVMN3aGU1g6HfzMvtF0fPn8Y n6fsLZa43wgnoTdjwjjikpDTrzoZbaL1mbODBzBVPAaTbim7IKKTge6r/iCKrOjz Uixvm3g9lVzx52zidJ9kWa8esmbOM1j0EPe7/hy3qH9DFo87KxEzjHNH3T6gY5t6 NJhRAIfY+YyTHpPCUCshj6IkRudE6w/qjEAmKP9kWZxoJrvPCTWOhCzelwsFS9b9 gxEYfsnaKhsfNhB6fi0PtWlMzPINmEA7SuPza33u5WtQUK7s1iNlgHfvMbjstbwg MSETn4SG2/ZyzUrSC06lVwV8kh0RgM3cENc/jpFfIHD0vKGI3qfka/1RY94kcOCG AeJd0YRSU2RqL7lmxhHyG8tdb8eexns41IzbPCLXX2sF00eKNkVvMRYT2mKfKLFF q8v1x7yuwmODdXfFR6NdCkGm9IU7wtL6wuQ8Nhu9UraFmcXo6X6FLJC18FqcvSb9 jvcRP4XY/8pNjjf44JB8yWfah0xGQsaMIKQGP4yLv4j6Xk1xAQKH1MqcC7l1D2HN 5Z24GibFqSK/vA== =QaAN -----END PGP SIGNATURE----- Merge tag 'x86-fsgsbase-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fsgsbase from Thomas Gleixner: "Support for FSGSBASE. Almost 5 years after the first RFC to support it, this has been brought into a shape which is maintainable and actually works. This final version was done by Sasha Levin who took it up after Intel dropped the ball. Sasha discovered that the SGX (sic!) offerings out there ship rogue kernel modules enabling FSGSBASE behind the kernels back which opens an instantanious unpriviledged root hole. The FSGSBASE instructions provide a considerable speedup of the context switch path and enable user space to write GSBASE without kernel interaction. This enablement requires careful handling of the exception entries which go through the paranoid entry path as they can no longer rely on the assumption that user GSBASE is positive (as enforced via prctl() on non FSGSBASE enabled systemn). All other entries (syscalls, interrupts and exceptions) can still just utilize SWAPGS unconditionally when the entry comes from user space. Converting these entries to use FSGSBASE has no benefit as SWAPGS is only marginally slower than WRGSBASE and locating and retrieving the kernel GSBASE value is not a free operation either. The real benefit of RD/WRGSBASE is the avoidance of the MSR reads and writes. The changes come with appropriate selftests and have held up in field testing against the (sanitized) Graphene-SGX driver" * tag 'x86-fsgsbase-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits) x86/fsgsbase: Fix Xen PV support x86/ptrace: Fix 32-bit PTRACE_SETREGS vs fsbase and gsbase selftests/x86/fsgsbase: Add a missing memory constraint selftests/x86/fsgsbase: Fix a comment in the ptrace_write_gsbase test selftests/x86: Add a syscall_arg_fault_64 test for negative GSBASE selftests/x86/fsgsbase: Test ptracer-induced GS base write with FSGSBASE selftests/x86/fsgsbase: Test GS selector on ptracer-induced GS base write Documentation/x86/64: Add documentation for GS/FS addressing mode x86/elf: Enumerate kernel FSGSBASE capability in AT_HWCAP2 x86/cpu: Enable FSGSBASE on 64bit by default and add a chicken bit x86/entry/64: Handle FSGSBASE enabled paranoid entry/exit x86/entry/64: Introduce the FIND_PERCPU_BASE macro x86/entry/64: Switch CR3 before SWAPGS in paranoid entry x86/speculation/swapgs: Check FSGSBASE in enabling SWAPGS mitigation x86/process/64: Use FSGSBASE instructions on thread copy and ptrace x86/process/64: Use FSBSBASE in switch_to() if available x86/process/64: Make save_fsgs_for_kvm() ready for FSGSBASE x86/fsgsbase/64: Enable FSGSBASE instructions in helper functions x86/fsgsbase/64: Add intrinsics for FSGSBASE instructions x86/cpu: Add 'unsafe_fsgsbase' to enable CR4.FSGSBASE ...
1403 lines
39 KiB
ArmAsm
1403 lines
39 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* linux/arch/x86_64/entry.S
|
|
*
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
|
|
* Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
|
|
*
|
|
* entry.S contains the system-call and fault low-level handling routines.
|
|
*
|
|
* Some of this is documented in Documentation/x86/entry_64.rst
|
|
*
|
|
* A note on terminology:
|
|
* - iret frame: Architecture defined interrupt frame from SS to RIP
|
|
* at the top of the kernel process stack.
|
|
*
|
|
* Some macro usage:
|
|
* - SYM_FUNC_START/END:Define functions in the symbol table.
|
|
* - idtentry: Define exception entry points.
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/msr.h>
|
|
#include <asm/unistd.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/hw_irq.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/irqflags.h>
|
|
#include <asm/paravirt.h>
|
|
#include <asm/percpu.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/smap.h>
|
|
#include <asm/pgtable_types.h>
|
|
#include <asm/export.h>
|
|
#include <asm/frame.h>
|
|
#include <asm/trapnr.h>
|
|
#include <asm/nospec-branch.h>
|
|
#include <asm/fsgsbase.h>
|
|
#include <linux/err.h>
|
|
|
|
#include "calling.h"
|
|
|
|
.code64
|
|
.section .entry.text, "ax"
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
SYM_CODE_START(native_usergs_sysret64)
|
|
UNWIND_HINT_EMPTY
|
|
swapgs
|
|
sysretq
|
|
SYM_CODE_END(native_usergs_sysret64)
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
/*
|
|
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
|
|
*
|
|
* This is the only entry point used for 64-bit system calls. The
|
|
* hardware interface is reasonably well designed and the register to
|
|
* argument mapping Linux uses fits well with the registers that are
|
|
* available when SYSCALL is used.
|
|
*
|
|
* SYSCALL instructions can be found inlined in libc implementations as
|
|
* well as some other programs and libraries. There are also a handful
|
|
* of SYSCALL instructions in the vDSO used, for example, as a
|
|
* clock_gettimeofday fallback.
|
|
*
|
|
* 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
|
|
* then loads new ss, cs, and rip from previously programmed MSRs.
|
|
* rflags gets masked by a value from another MSR (so CLD and CLAC
|
|
* are not needed). SYSCALL does not save anything on the stack
|
|
* and does not change rsp.
|
|
*
|
|
* Registers on entry:
|
|
* rax system call number
|
|
* rcx return address
|
|
* r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
|
|
* rdi arg0
|
|
* rsi arg1
|
|
* rdx arg2
|
|
* r10 arg3 (needs to be moved to rcx to conform to C ABI)
|
|
* r8 arg4
|
|
* r9 arg5
|
|
* (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
|
|
*
|
|
* Only called from user space.
|
|
*
|
|
* When user can change pt_regs->foo always force IRET. That is because
|
|
* it deals with uncanonical addresses better. SYSRET has trouble
|
|
* with them due to bugs in both AMD and Intel CPUs.
|
|
*/
|
|
|
|
SYM_CODE_START(entry_SYSCALL_64)
|
|
UNWIND_HINT_EMPTY
|
|
|
|
swapgs
|
|
/* tss.sp2 is scratch space. */
|
|
movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
|
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
|
|
|
/* Construct struct pt_regs on stack */
|
|
pushq $__USER_DS /* pt_regs->ss */
|
|
pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
|
|
pushq %r11 /* pt_regs->flags */
|
|
pushq $__USER_CS /* pt_regs->cs */
|
|
pushq %rcx /* pt_regs->ip */
|
|
SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
|
|
pushq %rax /* pt_regs->orig_ax */
|
|
|
|
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
|
|
|
|
/* IRQs are off. */
|
|
movq %rax, %rdi
|
|
movq %rsp, %rsi
|
|
call do_syscall_64 /* returns with IRQs disabled */
|
|
|
|
/*
|
|
* Try to use SYSRET instead of IRET if we're returning to
|
|
* a completely clean 64-bit userspace context. If we're not,
|
|
* go to the slow exit path.
|
|
*/
|
|
movq RCX(%rsp), %rcx
|
|
movq RIP(%rsp), %r11
|
|
|
|
cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */
|
|
jne swapgs_restore_regs_and_return_to_usermode
|
|
|
|
/*
|
|
* On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
|
|
* in kernel space. This essentially lets the user take over
|
|
* the kernel, since userspace controls RSP.
|
|
*
|
|
* If width of "canonical tail" ever becomes variable, this will need
|
|
* to be updated to remain correct on both old and new CPUs.
|
|
*
|
|
* Change top bits to match most significant bit (47th or 56th bit
|
|
* depending on paging mode) in the address.
|
|
*/
|
|
#ifdef CONFIG_X86_5LEVEL
|
|
ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
|
|
"shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
|
|
#else
|
|
shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
|
|
sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
|
|
#endif
|
|
|
|
/* If this changed %rcx, it was not canonical */
|
|
cmpq %rcx, %r11
|
|
jne swapgs_restore_regs_and_return_to_usermode
|
|
|
|
cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
|
|
jne swapgs_restore_regs_and_return_to_usermode
|
|
|
|
movq R11(%rsp), %r11
|
|
cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
|
|
jne swapgs_restore_regs_and_return_to_usermode
|
|
|
|
/*
|
|
* SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
|
|
* restore RF properly. If the slowpath sets it for whatever reason, we
|
|
* need to restore it correctly.
|
|
*
|
|
* SYSRET can restore TF, but unlike IRET, restoring TF results in a
|
|
* trap from userspace immediately after SYSRET. This would cause an
|
|
* infinite loop whenever #DB happens with register state that satisfies
|
|
* the opportunistic SYSRET conditions. For example, single-stepping
|
|
* this user code:
|
|
*
|
|
* movq $stuck_here, %rcx
|
|
* pushfq
|
|
* popq %r11
|
|
* stuck_here:
|
|
*
|
|
* would never get past 'stuck_here'.
|
|
*/
|
|
testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
|
|
jnz swapgs_restore_regs_and_return_to_usermode
|
|
|
|
/* nothing to check for RSP */
|
|
|
|
cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
|
|
jne swapgs_restore_regs_and_return_to_usermode
|
|
|
|
/*
|
|
* We win! This label is here just for ease of understanding
|
|
* perf profiles. Nothing jumps here.
|
|
*/
|
|
syscall_return_via_sysret:
|
|
/* rcx and r11 are already restored (see code above) */
|
|
POP_REGS pop_rdi=0 skip_r11rcx=1
|
|
|
|
/*
|
|
* Now all regs are restored except RSP and RDI.
|
|
* Save old stack pointer and switch to trampoline stack.
|
|
*/
|
|
movq %rsp, %rdi
|
|
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
|
|
UNWIND_HINT_EMPTY
|
|
|
|
pushq RSP-RDI(%rdi) /* RSP */
|
|
pushq (%rdi) /* RDI */
|
|
|
|
/*
|
|
* We are on the trampoline stack. All regs except RDI are live.
|
|
* We can do future final exit work right here.
|
|
*/
|
|
STACKLEAK_ERASE_NOCLOBBER
|
|
|
|
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
|
|
|
|
popq %rdi
|
|
popq %rsp
|
|
USERGS_SYSRET64
|
|
SYM_CODE_END(entry_SYSCALL_64)
|
|
|
|
/*
|
|
* %rdi: prev task
|
|
* %rsi: next task
|
|
*/
|
|
.pushsection .text, "ax"
|
|
SYM_FUNC_START(__switch_to_asm)
|
|
/*
|
|
* Save callee-saved registers
|
|
* This must match the order in inactive_task_frame
|
|
*/
|
|
pushq %rbp
|
|
pushq %rbx
|
|
pushq %r12
|
|
pushq %r13
|
|
pushq %r14
|
|
pushq %r15
|
|
|
|
/* switch stack */
|
|
movq %rsp, TASK_threadsp(%rdi)
|
|
movq TASK_threadsp(%rsi), %rsp
|
|
|
|
#ifdef CONFIG_STACKPROTECTOR
|
|
movq TASK_stack_canary(%rsi), %rbx
|
|
movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
|
|
#endif
|
|
|
|
#ifdef CONFIG_RETPOLINE
|
|
/*
|
|
* When switching from a shallower to a deeper call stack
|
|
* the RSB may either underflow or use entries populated
|
|
* with userspace addresses. On CPUs where those concerns
|
|
* exist, overwrite the RSB with entries which capture
|
|
* speculative execution to prevent attack.
|
|
*/
|
|
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
|
#endif
|
|
|
|
/* restore callee-saved registers */
|
|
popq %r15
|
|
popq %r14
|
|
popq %r13
|
|
popq %r12
|
|
popq %rbx
|
|
popq %rbp
|
|
|
|
jmp __switch_to
|
|
SYM_FUNC_END(__switch_to_asm)
|
|
.popsection
|
|
|
|
/*
|
|
* A newly forked process directly context switches into this address.
|
|
*
|
|
* rax: prev task we switched from
|
|
* rbx: kernel thread func (NULL for user thread)
|
|
* r12: kernel thread arg
|
|
*/
|
|
.pushsection .text, "ax"
|
|
SYM_CODE_START(ret_from_fork)
|
|
UNWIND_HINT_EMPTY
|
|
movq %rax, %rdi
|
|
call schedule_tail /* rdi: 'prev' task parameter */
|
|
|
|
testq %rbx, %rbx /* from kernel_thread? */
|
|
jnz 1f /* kernel threads are uncommon */
|
|
|
|
2:
|
|
UNWIND_HINT_REGS
|
|
movq %rsp, %rdi
|
|
call syscall_exit_to_user_mode /* returns with IRQs disabled */
|
|
jmp swapgs_restore_regs_and_return_to_usermode
|
|
|
|
1:
|
|
/* kernel thread */
|
|
UNWIND_HINT_EMPTY
|
|
movq %r12, %rdi
|
|
CALL_NOSPEC rbx
|
|
/*
|
|
* A kernel thread is allowed to return here after successfully
|
|
* calling kernel_execve(). Exit to userspace to complete the execve()
|
|
* syscall.
|
|
*/
|
|
movq $0, RAX(%rsp)
|
|
jmp 2b
|
|
SYM_CODE_END(ret_from_fork)
|
|
.popsection
|
|
|
|
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
|
|
#ifdef CONFIG_DEBUG_ENTRY
|
|
pushq %rax
|
|
SAVE_FLAGS(CLBR_RAX)
|
|
testl $X86_EFLAGS_IF, %eax
|
|
jz .Lokay_\@
|
|
ud2
|
|
.Lokay_\@:
|
|
popq %rax
|
|
#endif
|
|
.endm
|
|
|
|
/**
|
|
* idtentry_body - Macro to emit code calling the C function
|
|
* @cfunc: C function to be called
|
|
* @has_error_code: Hardware pushed error code on stack
|
|
*/
|
|
.macro idtentry_body cfunc has_error_code:req
|
|
|
|
call error_entry
|
|
UNWIND_HINT_REGS
|
|
|
|
movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
|
|
|
|
.if \has_error_code == 1
|
|
movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
|
|
movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
|
|
.endif
|
|
|
|
call \cfunc
|
|
|
|
jmp error_return
|
|
.endm
|
|
|
|
/**
|
|
* idtentry - Macro to generate entry stubs for simple IDT entries
|
|
* @vector: Vector number
|
|
* @asmsym: ASM symbol for the entry point
|
|
* @cfunc: C function to be called
|
|
* @has_error_code: Hardware pushed error code on stack
|
|
*
|
|
* The macro emits code to set up the kernel context for straight forward
|
|
* and simple IDT entries. No IST stack, no paranoid entry checks.
|
|
*/
|
|
.macro idtentry vector asmsym cfunc has_error_code:req
|
|
SYM_CODE_START(\asmsym)
|
|
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
|
|
ASM_CLAC
|
|
|
|
.if \has_error_code == 0
|
|
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
|
.endif
|
|
|
|
.if \vector == X86_TRAP_BP
|
|
/*
|
|
* If coming from kernel space, create a 6-word gap to allow the
|
|
* int3 handler to emulate a call instruction.
|
|
*/
|
|
testb $3, CS-ORIG_RAX(%rsp)
|
|
jnz .Lfrom_usermode_no_gap_\@
|
|
.rept 6
|
|
pushq 5*8(%rsp)
|
|
.endr
|
|
UNWIND_HINT_IRET_REGS offset=8
|
|
.Lfrom_usermode_no_gap_\@:
|
|
.endif
|
|
|
|
idtentry_body \cfunc \has_error_code
|
|
|
|
_ASM_NOKPROBE(\asmsym)
|
|
SYM_CODE_END(\asmsym)
|
|
.endm
|
|
|
|
/*
|
|
* Interrupt entry/exit.
|
|
*
|
|
+ The interrupt stubs push (vector) onto the stack, which is the error_code
|
|
* position of idtentry exceptions, and jump to one of the two idtentry points
|
|
* (common/spurious).
|
|
*
|
|
* common_interrupt is a hotpath, align it to a cache line
|
|
*/
|
|
.macro idtentry_irq vector cfunc
|
|
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
|
idtentry \vector asm_\cfunc \cfunc has_error_code=1
|
|
.endm
|
|
|
|
/*
|
|
* System vectors which invoke their handlers directly and are not
|
|
* going through the regular common device interrupt handling code.
|
|
*/
|
|
.macro idtentry_sysvec vector cfunc
|
|
idtentry \vector asm_\cfunc \cfunc has_error_code=0
|
|
.endm
|
|
|
|
/**
|
|
* idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
|
|
* @vector: Vector number
|
|
* @asmsym: ASM symbol for the entry point
|
|
* @cfunc: C function to be called
|
|
*
|
|
* The macro emits code to set up the kernel context for #MC and #DB
|
|
*
|
|
* If the entry comes from user space it uses the normal entry path
|
|
* including the return to user space work and preemption checks on
|
|
* exit.
|
|
*
|
|
* If hits in kernel mode then it needs to go through the paranoid
|
|
* entry as the exception can hit any random state. No preemption
|
|
* check on exit to keep the paranoid path simple.
|
|
*/
|
|
.macro idtentry_mce_db vector asmsym cfunc
|
|
SYM_CODE_START(\asmsym)
|
|
UNWIND_HINT_IRET_REGS
|
|
ASM_CLAC
|
|
|
|
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
|
|
|
/*
|
|
* If the entry is from userspace, switch stacks and treat it as
|
|
* a normal entry.
|
|
*/
|
|
testb $3, CS-ORIG_RAX(%rsp)
|
|
jnz .Lfrom_usermode_switch_stack_\@
|
|
|
|
/* paranoid_entry returns GS information for paranoid_exit in EBX. */
|
|
call paranoid_entry
|
|
|
|
UNWIND_HINT_REGS
|
|
|
|
movq %rsp, %rdi /* pt_regs pointer */
|
|
|
|
call \cfunc
|
|
|
|
jmp paranoid_exit
|
|
|
|
/* Switch to the regular task stack and use the noist entry point */
|
|
.Lfrom_usermode_switch_stack_\@:
|
|
idtentry_body noist_\cfunc, has_error_code=0
|
|
|
|
_ASM_NOKPROBE(\asmsym)
|
|
SYM_CODE_END(\asmsym)
|
|
.endm
|
|
|
|
/*
|
|
* Double fault entry. Straight paranoid. No checks from which context
|
|
* this comes because for the espfix induced #DF this would do the wrong
|
|
* thing.
|
|
*/
|
|
.macro idtentry_df vector asmsym cfunc
|
|
SYM_CODE_START(\asmsym)
|
|
UNWIND_HINT_IRET_REGS offset=8
|
|
ASM_CLAC
|
|
|
|
/* paranoid_entry returns GS information for paranoid_exit in EBX. */
|
|
call paranoid_entry
|
|
UNWIND_HINT_REGS
|
|
|
|
movq %rsp, %rdi /* pt_regs pointer into first argument */
|
|
movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
|
|
movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
|
|
call \cfunc
|
|
|
|
jmp paranoid_exit
|
|
|
|
_ASM_NOKPROBE(\asmsym)
|
|
SYM_CODE_END(\asmsym)
|
|
.endm
|
|
|
|
/*
|
|
* Include the defines which emit the idt entries which are shared
|
|
* shared between 32 and 64 bit and emit the __irqentry_text_* markers
|
|
* so the stacktrace boundary checks work.
|
|
*/
|
|
.align 16
|
|
.globl __irqentry_text_start
|
|
__irqentry_text_start:
|
|
|
|
#include <asm/idtentry.h>
|
|
|
|
.align 16
|
|
.globl __irqentry_text_end
|
|
__irqentry_text_end:
|
|
|
|
SYM_CODE_START_LOCAL(common_interrupt_return)
|
|
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
|
#ifdef CONFIG_DEBUG_ENTRY
|
|
/* Assert that pt_regs indicates user mode. */
|
|
testb $3, CS(%rsp)
|
|
jnz 1f
|
|
ud2
|
|
1:
|
|
#endif
|
|
POP_REGS pop_rdi=0
|
|
|
|
/*
|
|
* The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
|
|
* Save old stack pointer and switch to trampoline stack.
|
|
*/
|
|
movq %rsp, %rdi
|
|
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
|
|
UNWIND_HINT_EMPTY
|
|
|
|
/* Copy the IRET frame to the trampoline stack. */
|
|
pushq 6*8(%rdi) /* SS */
|
|
pushq 5*8(%rdi) /* RSP */
|
|
pushq 4*8(%rdi) /* EFLAGS */
|
|
pushq 3*8(%rdi) /* CS */
|
|
pushq 2*8(%rdi) /* RIP */
|
|
|
|
/* Push user RDI on the trampoline stack. */
|
|
pushq (%rdi)
|
|
|
|
/*
|
|
* We are on the trampoline stack. All regs except RDI are live.
|
|
* We can do future final exit work right here.
|
|
*/
|
|
STACKLEAK_ERASE_NOCLOBBER
|
|
|
|
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
|
|
|
|
/* Restore RDI. */
|
|
popq %rdi
|
|
SWAPGS
|
|
INTERRUPT_RETURN
|
|
|
|
|
|
SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
|
|
#ifdef CONFIG_DEBUG_ENTRY
|
|
/* Assert that pt_regs indicates kernel mode. */
|
|
testb $3, CS(%rsp)
|
|
jz 1f
|
|
ud2
|
|
1:
|
|
#endif
|
|
POP_REGS
|
|
addq $8, %rsp /* skip regs->orig_ax */
|
|
/*
|
|
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
|
|
* when returning from IPI handler.
|
|
*/
|
|
INTERRUPT_RETURN
|
|
|
|
SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
|
|
UNWIND_HINT_IRET_REGS
|
|
/*
|
|
* Are we returning to a stack segment from the LDT? Note: in
|
|
* 64-bit mode SS:RSP on the exception stack is always valid.
|
|
*/
|
|
#ifdef CONFIG_X86_ESPFIX64
|
|
testb $4, (SS-RIP)(%rsp)
|
|
jnz native_irq_return_ldt
|
|
#endif
|
|
|
|
SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
|
|
/*
|
|
* This may fault. Non-paranoid faults on return to userspace are
|
|
* handled by fixup_bad_iret. These include #SS, #GP, and #NP.
|
|
* Double-faults due to espfix64 are handled in exc_double_fault.
|
|
* Other faults here are fatal.
|
|
*/
|
|
iretq
|
|
|
|
#ifdef CONFIG_X86_ESPFIX64
|
|
native_irq_return_ldt:
|
|
/*
|
|
* We are running with user GSBASE. All GPRs contain their user
|
|
* values. We have a percpu ESPFIX stack that is eight slots
|
|
* long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
|
|
* of the ESPFIX stack.
|
|
*
|
|
* We clobber RAX and RDI in this code. We stash RDI on the
|
|
* normal stack and RAX on the ESPFIX stack.
|
|
*
|
|
* The ESPFIX stack layout we set up looks like this:
|
|
*
|
|
* --- top of ESPFIX stack ---
|
|
* SS
|
|
* RSP
|
|
* RFLAGS
|
|
* CS
|
|
* RIP <-- RSP points here when we're done
|
|
* RAX <-- espfix_waddr points here
|
|
* --- bottom of ESPFIX stack ---
|
|
*/
|
|
|
|
pushq %rdi /* Stash user RDI */
|
|
SWAPGS /* to kernel GS */
|
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
|
|
|
|
movq PER_CPU_VAR(espfix_waddr), %rdi
|
|
movq %rax, (0*8)(%rdi) /* user RAX */
|
|
movq (1*8)(%rsp), %rax /* user RIP */
|
|
movq %rax, (1*8)(%rdi)
|
|
movq (2*8)(%rsp), %rax /* user CS */
|
|
movq %rax, (2*8)(%rdi)
|
|
movq (3*8)(%rsp), %rax /* user RFLAGS */
|
|
movq %rax, (3*8)(%rdi)
|
|
movq (5*8)(%rsp), %rax /* user SS */
|
|
movq %rax, (5*8)(%rdi)
|
|
movq (4*8)(%rsp), %rax /* user RSP */
|
|
movq %rax, (4*8)(%rdi)
|
|
/* Now RAX == RSP. */
|
|
|
|
andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
|
|
|
|
/*
|
|
* espfix_stack[31:16] == 0. The page tables are set up such that
|
|
* (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
|
|
* espfix_waddr for any X. That is, there are 65536 RO aliases of
|
|
* the same page. Set up RSP so that RSP[31:16] contains the
|
|
* respective 16 bits of the /userspace/ RSP and RSP nonetheless
|
|
* still points to an RO alias of the ESPFIX stack.
|
|
*/
|
|
orq PER_CPU_VAR(espfix_stack), %rax
|
|
|
|
SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
|
|
SWAPGS /* to user GS */
|
|
popq %rdi /* Restore user RDI */
|
|
|
|
movq %rax, %rsp
|
|
UNWIND_HINT_IRET_REGS offset=8
|
|
|
|
/*
|
|
* At this point, we cannot write to the stack any more, but we can
|
|
* still read.
|
|
*/
|
|
popq %rax /* Restore user RAX */
|
|
|
|
/*
|
|
* RSP now points to an ordinary IRET frame, except that the page
|
|
* is read-only and RSP[31:16] are preloaded with the userspace
|
|
* values. We can now IRET back to userspace.
|
|
*/
|
|
jmp native_irq_return_iret
|
|
#endif
|
|
SYM_CODE_END(common_interrupt_return)
|
|
_ASM_NOKPROBE(common_interrupt_return)
|
|
|
|
/*
|
|
* Reload gs selector with exception handling
|
|
* edi: new selector
|
|
*
|
|
* Is in entry.text as it shouldn't be instrumented.
|
|
*/
|
|
SYM_FUNC_START(asm_load_gs_index)
|
|
FRAME_BEGIN
|
|
swapgs
|
|
.Lgs_change:
|
|
movl %edi, %gs
|
|
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
|
|
swapgs
|
|
FRAME_END
|
|
ret
|
|
SYM_FUNC_END(asm_load_gs_index)
|
|
EXPORT_SYMBOL(asm_load_gs_index)
|
|
|
|
_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
|
|
.section .fixup, "ax"
|
|
/* running with kernelgs */
|
|
SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
|
|
swapgs /* switch back to user gs */
|
|
.macro ZAP_GS
|
|
/* This can't be a string because the preprocessor needs to see it. */
|
|
movl $__USER_DS, %eax
|
|
movl %eax, %gs
|
|
.endm
|
|
ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
|
|
xorl %eax, %eax
|
|
movl %eax, %gs
|
|
jmp 2b
|
|
SYM_CODE_END(.Lbad_gs)
|
|
.previous
|
|
|
|
/*
|
|
* rdi: New stack pointer points to the top word of the stack
|
|
* rsi: Function pointer
|
|
* rdx: Function argument (can be NULL if none)
|
|
*/
|
|
SYM_FUNC_START(asm_call_on_stack)
|
|
/*
|
|
* Save the frame pointer unconditionally. This allows the ORC
|
|
* unwinder to handle the stack switch.
|
|
*/
|
|
pushq %rbp
|
|
mov %rsp, %rbp
|
|
|
|
/*
|
|
* The unwinder relies on the word at the top of the new stack
|
|
* page linking back to the previous RSP.
|
|
*/
|
|
mov %rsp, (%rdi)
|
|
mov %rdi, %rsp
|
|
/* Move the argument to the right place */
|
|
mov %rdx, %rdi
|
|
|
|
1:
|
|
.pushsection .discard.instr_begin
|
|
.long 1b - .
|
|
.popsection
|
|
|
|
CALL_NOSPEC rsi
|
|
|
|
2:
|
|
.pushsection .discard.instr_end
|
|
.long 2b - .
|
|
.popsection
|
|
|
|
/* Restore the previous stack pointer from RBP. */
|
|
leaveq
|
|
ret
|
|
SYM_FUNC_END(asm_call_on_stack)
|
|
|
|
#ifdef CONFIG_XEN_PV
|
|
/*
|
|
* A note on the "critical region" in our callback handler.
|
|
* We want to avoid stacking callback handlers due to events occurring
|
|
* during handling of the last event. To do this, we keep events disabled
|
|
* until we've done all processing. HOWEVER, we must enable events before
|
|
* popping the stack frame (can't be done atomically) and so it would still
|
|
* be possible to get enough handler activations to overflow the stack.
|
|
* Although unlikely, bugs of that kind are hard to track down, so we'd
|
|
* like to avoid the possibility.
|
|
* So, on entry to the handler we detect whether we interrupted an
|
|
* existing activation in its critical region -- if so, we pop the current
|
|
* activation and restart the handler using the previous one.
|
|
*
|
|
* C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
|
|
*/
|
|
SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
|
|
|
|
/*
|
|
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
|
|
* see the correct pointer to the pt_regs
|
|
*/
|
|
UNWIND_HINT_FUNC
|
|
movq %rdi, %rsp /* we don't return, adjust the stack frame */
|
|
UNWIND_HINT_REGS
|
|
|
|
call xen_pv_evtchn_do_upcall
|
|
|
|
jmp error_return
|
|
SYM_CODE_END(exc_xen_hypervisor_callback)
|
|
|
|
/*
|
|
* Hypervisor uses this for application faults while it executes.
|
|
* We get here for two reasons:
|
|
* 1. Fault while reloading DS, ES, FS or GS
|
|
* 2. Fault while executing IRET
|
|
* Category 1 we do not need to fix up as Xen has already reloaded all segment
|
|
* registers that could be reloaded and zeroed the others.
|
|
* Category 2 we fix up by killing the current process. We cannot use the
|
|
* normal Linux return path in this case because if we use the IRET hypercall
|
|
* to pop the stack frame we end up in an infinite loop of failsafe callbacks.
|
|
* We distinguish between categories by comparing each saved segment register
|
|
* with its current contents: any discrepancy means we in category 1.
|
|
*/
|
|
SYM_CODE_START(xen_failsafe_callback)
|
|
UNWIND_HINT_EMPTY
|
|
movl %ds, %ecx
|
|
cmpw %cx, 0x10(%rsp)
|
|
jne 1f
|
|
movl %es, %ecx
|
|
cmpw %cx, 0x18(%rsp)
|
|
jne 1f
|
|
movl %fs, %ecx
|
|
cmpw %cx, 0x20(%rsp)
|
|
jne 1f
|
|
movl %gs, %ecx
|
|
cmpw %cx, 0x28(%rsp)
|
|
jne 1f
|
|
/* All segments match their saved values => Category 2 (Bad IRET). */
|
|
movq (%rsp), %rcx
|
|
movq 8(%rsp), %r11
|
|
addq $0x30, %rsp
|
|
pushq $0 /* RIP */
|
|
UNWIND_HINT_IRET_REGS offset=8
|
|
jmp asm_exc_general_protection
|
|
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
|
|
movq (%rsp), %rcx
|
|
movq 8(%rsp), %r11
|
|
addq $0x30, %rsp
|
|
UNWIND_HINT_IRET_REGS
|
|
pushq $-1 /* orig_ax = -1 => not a system call */
|
|
PUSH_AND_CLEAR_REGS
|
|
ENCODE_FRAME_POINTER
|
|
jmp error_return
|
|
SYM_CODE_END(xen_failsafe_callback)
|
|
#endif /* CONFIG_XEN_PV */
|
|
|
|
/*
|
|
* Save all registers in pt_regs. Return GSBASE related information
|
|
* in EBX depending on the availability of the FSGSBASE instructions:
|
|
*
|
|
* FSGSBASE R/EBX
|
|
* N 0 -> SWAPGS on exit
|
|
* 1 -> no SWAPGS on exit
|
|
*
|
|
* Y GSBASE value at entry, must be restored in paranoid_exit
|
|
*/
|
|
SYM_CODE_START_LOCAL(paranoid_entry)
|
|
UNWIND_HINT_FUNC
|
|
cld
|
|
PUSH_AND_CLEAR_REGS save_ret=1
|
|
ENCODE_FRAME_POINTER 8
|
|
|
|
/*
|
|
* Always stash CR3 in %r14. This value will be restored,
|
|
* verbatim, at exit. Needed if paranoid_entry interrupted
|
|
* another entry that already switched to the user CR3 value
|
|
* but has not yet returned to userspace.
|
|
*
|
|
* This is also why CS (stashed in the "iret frame" by the
|
|
* hardware at entry) can not be used: this may be a return
|
|
* to kernel code, but with a user CR3 value.
|
|
*
|
|
* Switching CR3 does not depend on kernel GSBASE so it can
|
|
* be done before switching to the kernel GSBASE. This is
|
|
* required for FSGSBASE because the kernel GSBASE has to
|
|
* be retrieved from a kernel internal table.
|
|
*/
|
|
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
|
|
|
|
/*
|
|
* Handling GSBASE depends on the availability of FSGSBASE.
|
|
*
|
|
* Without FSGSBASE the kernel enforces that negative GSBASE
|
|
* values indicate kernel GSBASE. With FSGSBASE no assumptions
|
|
* can be made about the GSBASE value when entering from user
|
|
* space.
|
|
*/
|
|
ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
|
|
|
|
/*
|
|
* Read the current GSBASE and store it in %rbx unconditionally,
|
|
* retrieve and set the current CPUs kernel GSBASE. The stored value
|
|
* has to be restored in paranoid_exit unconditionally.
|
|
*
|
|
* The MSR write ensures that no subsequent load is based on a
|
|
* mispredicted GSBASE. No extra FENCE required.
|
|
*/
|
|
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
|
|
ret
|
|
|
|
.Lparanoid_entry_checkgs:
|
|
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
|
movl $1, %ebx
|
|
/*
|
|
* The kernel-enforced convention is a negative GSBASE indicates
|
|
* a kernel value. No SWAPGS needed on entry and exit.
|
|
*/
|
|
movl $MSR_GS_BASE, %ecx
|
|
rdmsr
|
|
testl %edx, %edx
|
|
jns .Lparanoid_entry_swapgs
|
|
ret
|
|
|
|
.Lparanoid_entry_swapgs:
|
|
SWAPGS
|
|
|
|
/*
|
|
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
|
|
* unconditional CR3 write, even in the PTI case. So do an lfence
|
|
* to prevent GS speculation, regardless of whether PTI is enabled.
|
|
*/
|
|
FENCE_SWAPGS_KERNEL_ENTRY
|
|
|
|
/* EBX = 0 -> SWAPGS required on exit */
|
|
xorl %ebx, %ebx
|
|
ret
|
|
SYM_CODE_END(paranoid_entry)
|
|
|
|
/*
|
|
* "Paranoid" exit path from exception stack. This is invoked
|
|
* only on return from non-NMI IST interrupts that came
|
|
* from kernel space.
|
|
*
|
|
* We may be returning to very strange contexts (e.g. very early
|
|
* in syscall entry), so checking for preemption here would
|
|
* be complicated. Fortunately, there's no good reason to try
|
|
* to handle preemption here.
|
|
*
|
|
* R/EBX contains the GSBASE related information depending on the
|
|
* availability of the FSGSBASE instructions:
|
|
*
|
|
* FSGSBASE R/EBX
|
|
* N 0 -> SWAPGS on exit
|
|
* 1 -> no SWAPGS on exit
|
|
*
|
|
* Y User space GSBASE, must be restored unconditionally
|
|
*/
|
|
SYM_CODE_START_LOCAL(paranoid_exit)
|
|
UNWIND_HINT_REGS
|
|
/*
|
|
* The order of operations is important. RESTORE_CR3 requires
|
|
* kernel GSBASE.
|
|
*
|
|
* NB to anyone to try to optimize this code: this code does
|
|
* not execute at all for exceptions from user mode. Those
|
|
* exceptions go through error_exit instead.
|
|
*/
|
|
RESTORE_CR3 scratch_reg=%rax save_reg=%r14
|
|
|
|
/* Handle the three GSBASE cases */
|
|
ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
|
|
|
|
/* With FSGSBASE enabled, unconditionally restore GSBASE */
|
|
wrgsbase %rbx
|
|
jmp restore_regs_and_return_to_kernel
|
|
|
|
.Lparanoid_exit_checkgs:
|
|
/* On non-FSGSBASE systems, conditionally do SWAPGS */
|
|
testl %ebx, %ebx
|
|
jnz restore_regs_and_return_to_kernel
|
|
|
|
/* We are returning to a context with user GSBASE */
|
|
SWAPGS_UNSAFE_STACK
|
|
jmp restore_regs_and_return_to_kernel
|
|
SYM_CODE_END(paranoid_exit)
|
|
|
|
/*
|
|
* Save all registers in pt_regs, and switch GS if needed.
|
|
*/
|
|
SYM_CODE_START_LOCAL(error_entry)
|
|
UNWIND_HINT_FUNC
|
|
cld
|
|
PUSH_AND_CLEAR_REGS save_ret=1
|
|
ENCODE_FRAME_POINTER 8
|
|
testb $3, CS+8(%rsp)
|
|
jz .Lerror_kernelspace
|
|
|
|
/*
|
|
* We entered from user mode or we're pretending to have entered
|
|
* from user mode due to an IRET fault.
|
|
*/
|
|
SWAPGS
|
|
FENCE_SWAPGS_USER_ENTRY
|
|
/* We have user CR3. Change to kernel CR3. */
|
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
|
|
|
.Lerror_entry_from_usermode_after_swapgs:
|
|
/* Put us onto the real thread stack. */
|
|
popq %r12 /* save return addr in %12 */
|
|
movq %rsp, %rdi /* arg0 = pt_regs pointer */
|
|
call sync_regs
|
|
movq %rax, %rsp /* switch stack */
|
|
ENCODE_FRAME_POINTER
|
|
pushq %r12
|
|
ret
|
|
|
|
.Lerror_entry_done_lfence:
|
|
FENCE_SWAPGS_KERNEL_ENTRY
|
|
.Lerror_entry_done:
|
|
ret
|
|
|
|
/*
|
|
* There are two places in the kernel that can potentially fault with
|
|
* usergs. Handle them here. B stepping K8s sometimes report a
|
|
* truncated RIP for IRET exceptions returning to compat mode. Check
|
|
* for these here too.
|
|
*/
|
|
.Lerror_kernelspace:
|
|
leaq native_irq_return_iret(%rip), %rcx
|
|
cmpq %rcx, RIP+8(%rsp)
|
|
je .Lerror_bad_iret
|
|
movl %ecx, %eax /* zero extend */
|
|
cmpq %rax, RIP+8(%rsp)
|
|
je .Lbstep_iret
|
|
cmpq $.Lgs_change, RIP+8(%rsp)
|
|
jne .Lerror_entry_done_lfence
|
|
|
|
/*
|
|
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
|
|
* gsbase and proceed. We'll fix up the exception and land in
|
|
* .Lgs_change's error handler with kernel gsbase.
|
|
*/
|
|
SWAPGS
|
|
FENCE_SWAPGS_USER_ENTRY
|
|
jmp .Lerror_entry_done
|
|
|
|
.Lbstep_iret:
|
|
/* Fix truncated RIP */
|
|
movq %rcx, RIP+8(%rsp)
|
|
/* fall through */
|
|
|
|
.Lerror_bad_iret:
|
|
/*
|
|
* We came from an IRET to user mode, so we have user
|
|
* gsbase and CR3. Switch to kernel gsbase and CR3:
|
|
*/
|
|
SWAPGS
|
|
FENCE_SWAPGS_USER_ENTRY
|
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
|
|
|
/*
|
|
* Pretend that the exception came from user mode: set up pt_regs
|
|
* as if we faulted immediately after IRET.
|
|
*/
|
|
mov %rsp, %rdi
|
|
call fixup_bad_iret
|
|
mov %rax, %rsp
|
|
jmp .Lerror_entry_from_usermode_after_swapgs
|
|
SYM_CODE_END(error_entry)
|
|
|
|
SYM_CODE_START_LOCAL(error_return)
|
|
UNWIND_HINT_REGS
|
|
DEBUG_ENTRY_ASSERT_IRQS_OFF
|
|
testb $3, CS(%rsp)
|
|
jz restore_regs_and_return_to_kernel
|
|
jmp swapgs_restore_regs_and_return_to_usermode
|
|
SYM_CODE_END(error_return)
|
|
|
|
/*
|
|
* Runs on exception stack. Xen PV does not go through this path at all,
|
|
* so we can use real assembly here.
|
|
*
|
|
* Registers:
|
|
* %r14: Used to save/restore the CR3 of the interrupted context
|
|
* when PAGE_TABLE_ISOLATION is in use. Do not clobber.
|
|
*/
|
|
SYM_CODE_START(asm_exc_nmi)
|
|
UNWIND_HINT_IRET_REGS
|
|
|
|
/*
|
|
* We allow breakpoints in NMIs. If a breakpoint occurs, then
|
|
* the iretq it performs will take us out of NMI context.
|
|
* This means that we can have nested NMIs where the next
|
|
* NMI is using the top of the stack of the previous NMI. We
|
|
* can't let it execute because the nested NMI will corrupt the
|
|
* stack of the previous NMI. NMI handlers are not re-entrant
|
|
* anyway.
|
|
*
|
|
* To handle this case we do the following:
|
|
* Check the a special location on the stack that contains
|
|
* a variable that is set when NMIs are executing.
|
|
* The interrupted task's stack is also checked to see if it
|
|
* is an NMI stack.
|
|
* If the variable is not set and the stack is not the NMI
|
|
* stack then:
|
|
* o Set the special variable on the stack
|
|
* o Copy the interrupt frame into an "outermost" location on the
|
|
* stack
|
|
* o Copy the interrupt frame into an "iret" location on the stack
|
|
* o Continue processing the NMI
|
|
* If the variable is set or the previous stack is the NMI stack:
|
|
* o Modify the "iret" location to jump to the repeat_nmi
|
|
* o return back to the first NMI
|
|
*
|
|
* Now on exit of the first NMI, we first clear the stack variable
|
|
* The NMI stack will tell any nested NMIs at that point that it is
|
|
* nested. Then we pop the stack normally with iret, and if there was
|
|
* a nested NMI that updated the copy interrupt stack frame, a
|
|
* jump will be made to the repeat_nmi code that will handle the second
|
|
* NMI.
|
|
*
|
|
* However, espfix prevents us from directly returning to userspace
|
|
* with a single IRET instruction. Similarly, IRET to user mode
|
|
* can fault. We therefore handle NMIs from user space like
|
|
* other IST entries.
|
|
*/
|
|
|
|
ASM_CLAC
|
|
|
|
/* Use %rdx as our temp variable throughout */
|
|
pushq %rdx
|
|
|
|
testb $3, CS-RIP+8(%rsp)
|
|
jz .Lnmi_from_kernel
|
|
|
|
/*
|
|
* NMI from user mode. We need to run on the thread stack, but we
|
|
* can't go through the normal entry paths: NMIs are masked, and
|
|
* we don't want to enable interrupts, because then we'll end
|
|
* up in an awkward situation in which IRQs are on but NMIs
|
|
* are off.
|
|
*
|
|
* We also must not push anything to the stack before switching
|
|
* stacks lest we corrupt the "NMI executing" variable.
|
|
*/
|
|
|
|
swapgs
|
|
cld
|
|
FENCE_SWAPGS_USER_ENTRY
|
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
|
|
movq %rsp, %rdx
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
|
UNWIND_HINT_IRET_REGS base=%rdx offset=8
|
|
pushq 5*8(%rdx) /* pt_regs->ss */
|
|
pushq 4*8(%rdx) /* pt_regs->rsp */
|
|
pushq 3*8(%rdx) /* pt_regs->flags */
|
|
pushq 2*8(%rdx) /* pt_regs->cs */
|
|
pushq 1*8(%rdx) /* pt_regs->rip */
|
|
UNWIND_HINT_IRET_REGS
|
|
pushq $-1 /* pt_regs->orig_ax */
|
|
PUSH_AND_CLEAR_REGS rdx=(%rdx)
|
|
ENCODE_FRAME_POINTER
|
|
|
|
/*
|
|
* At this point we no longer need to worry about stack damage
|
|
* due to nesting -- we're on the normal thread stack and we're
|
|
* done with the NMI stack.
|
|
*/
|
|
|
|
movq %rsp, %rdi
|
|
movq $-1, %rsi
|
|
call exc_nmi
|
|
|
|
/*
|
|
* Return back to user mode. We must *not* do the normal exit
|
|
* work, because we don't want to enable interrupts.
|
|
*/
|
|
jmp swapgs_restore_regs_and_return_to_usermode
|
|
|
|
.Lnmi_from_kernel:
|
|
/*
|
|
* Here's what our stack frame will look like:
|
|
* +---------------------------------------------------------+
|
|
* | original SS |
|
|
* | original Return RSP |
|
|
* | original RFLAGS |
|
|
* | original CS |
|
|
* | original RIP |
|
|
* +---------------------------------------------------------+
|
|
* | temp storage for rdx |
|
|
* +---------------------------------------------------------+
|
|
* | "NMI executing" variable |
|
|
* +---------------------------------------------------------+
|
|
* | iret SS } Copied from "outermost" frame |
|
|
* | iret Return RSP } on each loop iteration; overwritten |
|
|
* | iret RFLAGS } by a nested NMI to force another |
|
|
* | iret CS } iteration if needed. |
|
|
* | iret RIP } |
|
|
* +---------------------------------------------------------+
|
|
* | outermost SS } initialized in first_nmi; |
|
|
* | outermost Return RSP } will not be changed before |
|
|
* | outermost RFLAGS } NMI processing is done. |
|
|
* | outermost CS } Copied to "iret" frame on each |
|
|
* | outermost RIP } iteration. |
|
|
* +---------------------------------------------------------+
|
|
* | pt_regs |
|
|
* +---------------------------------------------------------+
|
|
*
|
|
* The "original" frame is used by hardware. Before re-enabling
|
|
* NMIs, we need to be done with it, and we need to leave enough
|
|
* space for the asm code here.
|
|
*
|
|
* We return by executing IRET while RSP points to the "iret" frame.
|
|
* That will either return for real or it will loop back into NMI
|
|
* processing.
|
|
*
|
|
* The "outermost" frame is copied to the "iret" frame on each
|
|
* iteration of the loop, so each iteration starts with the "iret"
|
|
* frame pointing to the final return target.
|
|
*/
|
|
|
|
/*
|
|
* Determine whether we're a nested NMI.
|
|
*
|
|
* If we interrupted kernel code between repeat_nmi and
|
|
* end_repeat_nmi, then we are a nested NMI. We must not
|
|
* modify the "iret" frame because it's being written by
|
|
* the outer NMI. That's okay; the outer NMI handler is
|
|
* about to about to call exc_nmi() anyway, so we can just
|
|
* resume the outer NMI.
|
|
*/
|
|
|
|
movq $repeat_nmi, %rdx
|
|
cmpq 8(%rsp), %rdx
|
|
ja 1f
|
|
movq $end_repeat_nmi, %rdx
|
|
cmpq 8(%rsp), %rdx
|
|
ja nested_nmi_out
|
|
1:
|
|
|
|
/*
|
|
* Now check "NMI executing". If it's set, then we're nested.
|
|
* This will not detect if we interrupted an outer NMI just
|
|
* before IRET.
|
|
*/
|
|
cmpl $1, -8(%rsp)
|
|
je nested_nmi
|
|
|
|
/*
|
|
* Now test if the previous stack was an NMI stack. This covers
|
|
* the case where we interrupt an outer NMI after it clears
|
|
* "NMI executing" but before IRET. We need to be careful, though:
|
|
* there is one case in which RSP could point to the NMI stack
|
|
* despite there being no NMI active: naughty userspace controls
|
|
* RSP at the very beginning of the SYSCALL targets. We can
|
|
* pull a fast one on naughty userspace, though: we program
|
|
* SYSCALL to mask DF, so userspace cannot cause DF to be set
|
|
* if it controls the kernel's RSP. We set DF before we clear
|
|
* "NMI executing".
|
|
*/
|
|
lea 6*8(%rsp), %rdx
|
|
/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
|
|
cmpq %rdx, 4*8(%rsp)
|
|
/* If the stack pointer is above the NMI stack, this is a normal NMI */
|
|
ja first_nmi
|
|
|
|
subq $EXCEPTION_STKSZ, %rdx
|
|
cmpq %rdx, 4*8(%rsp)
|
|
/* If it is below the NMI stack, it is a normal NMI */
|
|
jb first_nmi
|
|
|
|
/* Ah, it is within the NMI stack. */
|
|
|
|
testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
|
|
jz first_nmi /* RSP was user controlled. */
|
|
|
|
/* This is a nested NMI. */
|
|
|
|
nested_nmi:
|
|
/*
|
|
* Modify the "iret" frame to point to repeat_nmi, forcing another
|
|
* iteration of NMI handling.
|
|
*/
|
|
subq $8, %rsp
|
|
leaq -10*8(%rsp), %rdx
|
|
pushq $__KERNEL_DS
|
|
pushq %rdx
|
|
pushfq
|
|
pushq $__KERNEL_CS
|
|
pushq $repeat_nmi
|
|
|
|
/* Put stack back */
|
|
addq $(6*8), %rsp
|
|
|
|
nested_nmi_out:
|
|
popq %rdx
|
|
|
|
/* We are returning to kernel mode, so this cannot result in a fault. */
|
|
iretq
|
|
|
|
first_nmi:
|
|
/* Restore rdx. */
|
|
movq (%rsp), %rdx
|
|
|
|
/* Make room for "NMI executing". */
|
|
pushq $0
|
|
|
|
/* Leave room for the "iret" frame */
|
|
subq $(5*8), %rsp
|
|
|
|
/* Copy the "original" frame to the "outermost" frame */
|
|
.rept 5
|
|
pushq 11*8(%rsp)
|
|
.endr
|
|
UNWIND_HINT_IRET_REGS
|
|
|
|
/* Everything up to here is safe from nested NMIs */
|
|
|
|
#ifdef CONFIG_DEBUG_ENTRY
|
|
/*
|
|
* For ease of testing, unmask NMIs right away. Disabled by
|
|
* default because IRET is very expensive.
|
|
*/
|
|
pushq $0 /* SS */
|
|
pushq %rsp /* RSP (minus 8 because of the previous push) */
|
|
addq $8, (%rsp) /* Fix up RSP */
|
|
pushfq /* RFLAGS */
|
|
pushq $__KERNEL_CS /* CS */
|
|
pushq $1f /* RIP */
|
|
iretq /* continues at repeat_nmi below */
|
|
UNWIND_HINT_IRET_REGS
|
|
1:
|
|
#endif
|
|
|
|
repeat_nmi:
|
|
/*
|
|
* If there was a nested NMI, the first NMI's iret will return
|
|
* here. But NMIs are still enabled and we can take another
|
|
* nested NMI. The nested NMI checks the interrupted RIP to see
|
|
* if it is between repeat_nmi and end_repeat_nmi, and if so
|
|
* it will just return, as we are about to repeat an NMI anyway.
|
|
* This makes it safe to copy to the stack frame that a nested
|
|
* NMI will update.
|
|
*
|
|
* RSP is pointing to "outermost RIP". gsbase is unknown, but, if
|
|
* we're repeating an NMI, gsbase has the same value that it had on
|
|
* the first iteration. paranoid_entry will load the kernel
|
|
* gsbase if needed before we call exc_nmi(). "NMI executing"
|
|
* is zero.
|
|
*/
|
|
movq $1, 10*8(%rsp) /* Set "NMI executing". */
|
|
|
|
/*
|
|
* Copy the "outermost" frame to the "iret" frame. NMIs that nest
|
|
* here must not modify the "iret" frame while we're writing to
|
|
* it or it will end up containing garbage.
|
|
*/
|
|
addq $(10*8), %rsp
|
|
.rept 5
|
|
pushq -6*8(%rsp)
|
|
.endr
|
|
subq $(5*8), %rsp
|
|
end_repeat_nmi:
|
|
|
|
/*
|
|
* Everything below this point can be preempted by a nested NMI.
|
|
* If this happens, then the inner NMI will change the "iret"
|
|
* frame to point back to repeat_nmi.
|
|
*/
|
|
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
|
|
|
/*
|
|
* Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
|
|
* as we should not be calling schedule in NMI context.
|
|
* Even with normal interrupts enabled. An NMI should not be
|
|
* setting NEED_RESCHED or anything that normal interrupts and
|
|
* exceptions might do.
|
|
*/
|
|
call paranoid_entry
|
|
UNWIND_HINT_REGS
|
|
|
|
movq %rsp, %rdi
|
|
movq $-1, %rsi
|
|
call exc_nmi
|
|
|
|
/* Always restore stashed CR3 value (see paranoid_entry) */
|
|
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
|
|
|
|
/*
|
|
* The above invocation of paranoid_entry stored the GSBASE
|
|
* related information in R/EBX depending on the availability
|
|
* of FSGSBASE.
|
|
*
|
|
* If FSGSBASE is enabled, restore the saved GSBASE value
|
|
* unconditionally, otherwise take the conditional SWAPGS path.
|
|
*/
|
|
ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
|
|
|
|
wrgsbase %rbx
|
|
jmp nmi_restore
|
|
|
|
nmi_no_fsgsbase:
|
|
/* EBX == 0 -> invoke SWAPGS */
|
|
testl %ebx, %ebx
|
|
jnz nmi_restore
|
|
|
|
nmi_swapgs:
|
|
SWAPGS_UNSAFE_STACK
|
|
|
|
nmi_restore:
|
|
POP_REGS
|
|
|
|
/*
|
|
* Skip orig_ax and the "outermost" frame to point RSP at the "iret"
|
|
* at the "iret" frame.
|
|
*/
|
|
addq $6*8, %rsp
|
|
|
|
/*
|
|
* Clear "NMI executing". Set DF first so that we can easily
|
|
* distinguish the remaining code between here and IRET from
|
|
* the SYSCALL entry and exit paths.
|
|
*
|
|
* We arguably should just inspect RIP instead, but I (Andy) wrote
|
|
* this code when I had the misapprehension that Xen PV supported
|
|
* NMIs, and Xen PV would break that approach.
|
|
*/
|
|
std
|
|
movq $0, 5*8(%rsp) /* clear "NMI executing" */
|
|
|
|
/*
|
|
* iretq reads the "iret" frame and exits the NMI stack in a
|
|
* single instruction. We are returning to kernel mode, so this
|
|
* cannot result in a fault. Similarly, we don't need to worry
|
|
* about espfix64 on the way back to kernel mode.
|
|
*/
|
|
iretq
|
|
SYM_CODE_END(asm_exc_nmi)
|
|
|
|
#ifndef CONFIG_IA32_EMULATION
|
|
/*
|
|
* This handles SYSCALL from 32-bit code. There is no way to program
|
|
* MSRs to fully disable 32-bit SYSCALL.
|
|
*/
|
|
SYM_CODE_START(ignore_sysret)
|
|
UNWIND_HINT_EMPTY
|
|
mov $-ENOSYS, %eax
|
|
sysretl
|
|
SYM_CODE_END(ignore_sysret)
|
|
#endif
|
|
|
|
.pushsection .text, "ax"
|
|
SYM_CODE_START(rewind_stack_do_exit)
|
|
UNWIND_HINT_FUNC
|
|
/* Prevent any naive code from trying to unwind to our caller. */
|
|
xorl %ebp, %ebp
|
|
|
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
|
|
leaq -PTREGS_SIZE(%rax), %rsp
|
|
UNWIND_HINT_REGS
|
|
|
|
call do_exit
|
|
SYM_CODE_END(rewind_stack_do_exit)
|
|
.popsection
|