mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 06:55:13 +08:00
1dde7415e9
Simplify it to call an asm-function instead of pasting 41 insn bytes at every call site. Also, add alignment to the macro as suggested here: https://support.google.com/faqs/answer/7625886 [dwmw2: Clean up comments, let it clobber %ebx and just tell the compiler] Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: ak@linux.intel.com Cc: dave.hansen@intel.com Cc: karahmed@amazon.de Cc: arjan@linux.intel.com Cc: torvalds@linux-foundation.org Cc: peterz@infradead.org Cc: bp@alien8.de Cc: pbonzini@redhat.com Cc: tim.c.chen@linux.intel.com Cc: gregkh@linux-foundation.org Link: https://lkml.kernel.org/r/1517070274-12128-3-git-send-email-dwmw@amazon.co.uk
105 lines
2.6 KiB
ArmAsm
105 lines
2.6 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#include <linux/stringify.h>
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/alternative-asm.h>
|
|
#include <asm/export.h>
|
|
#include <asm/nospec-branch.h>
|
|
#include <asm/bitsperlong.h>
|
|
|
|
.macro THUNK reg
|
|
.section .text.__x86.indirect_thunk
|
|
|
|
ENTRY(__x86_indirect_thunk_\reg)
|
|
CFI_STARTPROC
|
|
JMP_NOSPEC %\reg
|
|
CFI_ENDPROC
|
|
ENDPROC(__x86_indirect_thunk_\reg)
|
|
.endm
|
|
|
|
/*
|
|
* Despite being an assembler file we can't just use .irp here
|
|
* because __KSYM_DEPS__ only uses the C preprocessor and would
|
|
* only see one instance of "__x86_indirect_thunk_\reg" rather
|
|
* than one per register with the correct names. So we do it
|
|
* the simple and nasty way...
|
|
*/
|
|
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
|
|
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
|
|
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
|
|
|
|
GENERATE_THUNK(_ASM_AX)
|
|
GENERATE_THUNK(_ASM_BX)
|
|
GENERATE_THUNK(_ASM_CX)
|
|
GENERATE_THUNK(_ASM_DX)
|
|
GENERATE_THUNK(_ASM_SI)
|
|
GENERATE_THUNK(_ASM_DI)
|
|
GENERATE_THUNK(_ASM_BP)
|
|
#ifdef CONFIG_64BIT
|
|
GENERATE_THUNK(r8)
|
|
GENERATE_THUNK(r9)
|
|
GENERATE_THUNK(r10)
|
|
GENERATE_THUNK(r11)
|
|
GENERATE_THUNK(r12)
|
|
GENERATE_THUNK(r13)
|
|
GENERATE_THUNK(r14)
|
|
GENERATE_THUNK(r15)
|
|
#endif
|
|
|
|
/*
|
|
* Fill the CPU return stack buffer.
|
|
*
|
|
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
|
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
|
*
|
|
* This is required in various cases for retpoline and IBRS-based
|
|
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
|
* eliminate potentially bogus entries from the RSB, and sometimes
|
|
* purely to ensure that it doesn't get empty, which on some CPUs would
|
|
* allow predictions from other (unwanted!) sources to be used.
|
|
*
|
|
* Google experimented with loop-unrolling and this turned out to be
|
|
* the optimal version - two calls, each with their own speculation
|
|
* trap should their return address end up getting used, in a loop.
|
|
*/
|
|
.macro STUFF_RSB nr:req sp:req
|
|
mov $(\nr / 2), %_ASM_BX
|
|
.align 16
|
|
771:
|
|
call 772f
|
|
773: /* speculation trap */
|
|
pause
|
|
lfence
|
|
jmp 773b
|
|
.align 16
|
|
772:
|
|
call 774f
|
|
775: /* speculation trap */
|
|
pause
|
|
lfence
|
|
jmp 775b
|
|
.align 16
|
|
774:
|
|
dec %_ASM_BX
|
|
jnz 771b
|
|
add $((BITS_PER_LONG/8) * \nr), \sp
|
|
.endm
|
|
|
|
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
|
|
|
|
ENTRY(__fill_rsb)
|
|
STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
|
|
ret
|
|
END(__fill_rsb)
|
|
EXPORT_SYMBOL_GPL(__fill_rsb)
|
|
|
|
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
|
|
|
ENTRY(__clear_rsb)
|
|
STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
|
|
ret
|
|
END(__clear_rsb)
|
|
EXPORT_SYMBOL_GPL(__clear_rsb)
|