mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-29 14:05:19 +08:00
10de638d8e
- Add support for stackleak feature. Also allow specifying architecture-specific stackleak poison function to enable faster implementation. On s390, the mvc-based implementation helps decrease typical overhead from a factor of 3 to just 25% - Convert all assembler files to use SYM* style macros, deprecating the ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS - Improve KASLR to also randomize module and special amode31 code base load addresses - Rework decompressor memory tracking to support memory holes and improve error handling - Add support for protected virtualization AP binding - Add support for set_direct_map() calls - Implement set_memory_rox() and noexec module_alloc() - Remove obsolete overriding of mem*() functions for KASAN - Rework kexec/kdump to avoid using nodat_stack to call purgatory - Convert the rest of the s390 code to use flexible-array member instead of a zero-length array - Clean up uaccess inline asm - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable DEBUG_FORCE_FUNCTION_ALIGN_64B - Resolve last_break in userspace fault reports - Simplify one-level sysctl registration - Clean up branch prediction handling - Rework CPU counter facility to retrieve available counter sets just once - Other various small fixes and improvements all over the code -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmRM8pwACgkQjYWKoQLX FBjV1AgAlvAhu1XkwOdwqdT4GqE8pcN4XXzydog1MYihrSO2PdgWAxpEW7o2QURN W+3xa6RIqt7nX2YBiwTanMZ12TYaFY7noGl3eUpD/NhueprweVirVl7VZUEuRoW/ j0mbx77xsVzLfuDFxkpVwE6/j+tTO78kLyjUHwcN9rFVUaL7/orJneDJf+V8fZG0 sHLOv0aljF7Jr2IIkw82lCmW/vdk7k0dACWMXK2kj1H3dIK34B9X4AdKDDf/WKXk /OSElBeZ93tSGEfNDRIda6iR52xocROaRnQAaDtargKFl9VO0/dN9ADxO+SLNHjN pFE/9VD6xT/xo4IuZZh/Z3TcYfiLvA== =Geqx -----END PGP SIGNATURE----- Merge tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Vasily Gorbik: - Add support for stackleak feature. Also allow specifying architecture-specific stackleak poison function to enable faster implementation. On s390, the mvc-based implementation helps decrease typical overhead from a factor of 3 to just 25% - Convert all assembler files to use SYM* style macros, deprecating the ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS - Improve KASLR to also randomize module and special amode31 code base load addresses - Rework decompressor memory tracking to support memory holes and improve error handling - Add support for protected virtualization AP binding - Add support for set_direct_map() calls - Implement set_memory_rox() and noexec module_alloc() - Remove obsolete overriding of mem*() functions for KASAN - Rework kexec/kdump to avoid using nodat_stack to call purgatory - Convert the rest of the s390 code to use flexible-array member instead of a zero-length array - Clean up uaccess inline asm - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable DEBUG_FORCE_FUNCTION_ALIGN_64B - Resolve last_break in userspace fault reports - Simplify one-level sysctl registration - Clean up branch prediction handling - Rework CPU counter facility to retrieve available counter sets just once - Other various small fixes and improvements all over the code * tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (118 commits) s390/stackleak: provide fast __stackleak_poison() implementation stackleak: allow to specify arch specific stackleak poison function s390: select ARCH_USE_SYM_ANNOTATIONS s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc() s390: wire up memfd_secret system call s390/mm: enable ARCH_HAS_SET_DIRECT_MAP s390/mm: use BIT macro to generate SET_MEMORY bit masks s390/relocate_kernel: adjust indentation s390/relocate_kernel: use SYM* macros instead of ENTRY(), etc. s390/entry: use SYM* macros instead of ENTRY(), etc. s390/purgatory: use SYM* macros instead of ENTRY(), etc. s390/kprobes: use SYM* macros instead of ENTRY(), etc. s390/reipl: use SYM* macros instead of ENTRY(), etc. s390/head64: use SYM* macros instead of ENTRY(), etc. s390/earlypgm: use SYM* macros instead of ENTRY(), etc. s390/mcount: use SYM* macros instead of ENTRY(), etc. s390/crc32le: use SYM* macros instead of ENTRY(), etc. s390/crc32be: use SYM* macros instead of ENTRY(), etc. s390/crypto,chacha: use SYM* macros instead of ENTRY(), etc. s390/amode31: use SYM* macros instead of ENTRY(), etc. ...
187 lines
4.7 KiB
ArmAsm
187 lines
4.7 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright IBM Corp. 2008, 2009
|
|
*
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/ftrace.h>
|
|
#include <asm/nospec-insn.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/export.h>
|
|
|
|
|
|
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
|
|
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
|
|
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
|
|
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
|
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
|
|
#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
|
|
/* packed stack: allocate just enough for r14, r15 and backchain */
|
|
#define TRACED_FUNC_FRAME_SIZE 24
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
|
|
GEN_BR_THUNK %r1
|
|
GEN_BR_THUNK %r14
|
|
|
|
.section .kprobes.text, "ax"
|
|
|
|
SYM_FUNC_START(ftrace_stub)
|
|
BR_EX %r14
|
|
SYM_FUNC_END(ftrace_stub)
|
|
|
|
SYM_CODE_START(ftrace_stub_direct_tramp)
|
|
lgr %r1, %r0
|
|
BR_EX %r1
|
|
SYM_CODE_END(ftrace_stub_direct_tramp)
|
|
|
|
.macro ftrace_regs_entry, allregs=0
|
|
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
|
|
|
|
.if \allregs == 1
|
|
# save psw mask
|
|
# don't put any instructions clobbering CC before this point
|
|
epsw %r1,%r14
|
|
risbg %r14,%r1,0,31,32
|
|
.endif
|
|
|
|
lgr %r1,%r15
|
|
# allocate stack frame for ftrace_caller to contain traced function
|
|
aghi %r15,-TRACED_FUNC_FRAME_SIZE
|
|
stg %r1,__SF_BACKCHAIN(%r15)
|
|
stg %r0,(__SF_GPRS+8*8)(%r15)
|
|
stg %r15,(__SF_GPRS+9*8)(%r15)
|
|
# allocate pt_regs and stack frame for ftrace_trace_function
|
|
aghi %r15,-STACK_FRAME_SIZE
|
|
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
|
xc STACK_PTREGS_ORIG_GPR2(8,%r15),STACK_PTREGS_ORIG_GPR2(%r15)
|
|
|
|
.if \allregs == 1
|
|
stg %r14,(STACK_PTREGS_PSW)(%r15)
|
|
mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
|
|
.else
|
|
xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
|
|
.endif
|
|
|
|
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
|
|
aghi %r1,-TRACED_FUNC_FRAME_SIZE
|
|
stg %r1,__SF_BACKCHAIN(%r15)
|
|
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
|
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
|
|
.endm
|
|
|
|
SYM_CODE_START(ftrace_regs_caller)
|
|
ftrace_regs_entry 1
|
|
j ftrace_common
|
|
SYM_CODE_END(ftrace_regs_caller)
|
|
|
|
SYM_CODE_START(ftrace_caller)
|
|
ftrace_regs_entry 0
|
|
j ftrace_common
|
|
SYM_CODE_END(ftrace_caller)
|
|
|
|
SYM_CODE_START(ftrace_common)
|
|
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
|
aghik %r2,%r0,-MCOUNT_INSN_SIZE
|
|
lgrl %r4,function_trace_op
|
|
lgrl %r1,ftrace_func
|
|
#else
|
|
lgr %r2,%r0
|
|
aghi %r2,-MCOUNT_INSN_SIZE
|
|
larl %r4,function_trace_op
|
|
lg %r4,0(%r4)
|
|
larl %r1,ftrace_func
|
|
lg %r1,0(%r1)
|
|
#endif
|
|
lgr %r3,%r14
|
|
la %r5,STACK_PTREGS(%r15)
|
|
BASR_EX %r14,%r1
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
# The j instruction gets runtime patched to a nop instruction.
|
|
# See ftrace_enable_ftrace_graph_caller.
|
|
SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
|
|
j .Lftrace_graph_caller_end
|
|
lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
|
|
lg %r4,(STACK_PTREGS_PSW+8)(%r15)
|
|
brasl %r14,prepare_ftrace_return
|
|
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
|
|
.Lftrace_graph_caller_end:
|
|
#endif
|
|
lg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
|
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
|
ltg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
|
|
locgrz %r1,%r0
|
|
#else
|
|
lg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
|
|
ltgr %r1,%r1
|
|
jnz 0f
|
|
lgr %r1,%r0
|
|
#endif
|
|
0: lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
|
|
BR_EX %r1
|
|
SYM_CODE_END(ftrace_common)
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
SYM_FUNC_START(return_to_handler)
|
|
stmg %r2,%r5,32(%r15)
|
|
lgr %r1,%r15
|
|
aghi %r15,-STACK_FRAME_OVERHEAD
|
|
stg %r1,__SF_BACKCHAIN(%r15)
|
|
brasl %r14,ftrace_return_to_handler
|
|
aghi %r15,STACK_FRAME_OVERHEAD
|
|
lgr %r14,%r2
|
|
lmg %r2,%r5,32(%r15)
|
|
BR_EX %r14
|
|
SYM_FUNC_END(return_to_handler)
|
|
|
|
#endif
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
|
|
|
SYM_CODE_START(ftrace_shared_hotpatch_trampoline_br)
|
|
lmg %r0,%r1,2(%r1)
|
|
br %r1
|
|
SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_br_end, SYM_L_GLOBAL)
|
|
SYM_CODE_END(ftrace_shared_hotpatch_trampoline_br)
|
|
|
|
#ifdef CONFIG_EXPOLINE
|
|
SYM_CODE_START(ftrace_shared_hotpatch_trampoline_exrl)
|
|
lmg %r0,%r1,2(%r1)
|
|
exrl %r0,0f
|
|
j .
|
|
0: br %r1
|
|
SYM_INNER_LABEL(ftrace_shared_hotpatch_trampoline_exrl_end, SYM_L_GLOBAL)
|
|
SYM_CODE_END(ftrace_shared_hotpatch_trampoline_exrl)
|
|
#endif /* CONFIG_EXPOLINE */
|
|
|
|
#ifdef CONFIG_RETHOOK
|
|
|
|
SYM_CODE_START(arch_rethook_trampoline)
|
|
stg %r14,(__SF_GPRS+8*8)(%r15)
|
|
lay %r15,-STACK_FRAME_SIZE(%r15)
|
|
stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
|
|
|
|
# store original stack pointer in backchain and pt_regs
|
|
lay %r7,STACK_FRAME_SIZE(%r15)
|
|
stg %r7,__SF_BACKCHAIN(%r15)
|
|
stg %r7,STACK_PTREGS_GPRS+(15*8)(%r15)
|
|
|
|
# store full psw
|
|
epsw %r2,%r3
|
|
risbg %r3,%r2,0,31,32
|
|
stg %r3,STACK_PTREGS_PSW(%r15)
|
|
larl %r1,arch_rethook_trampoline
|
|
stg %r1,STACK_PTREGS_PSW+8(%r15)
|
|
|
|
lay %r2,STACK_PTREGS(%r15)
|
|
brasl %r14,arch_rethook_trampoline_callback
|
|
|
|
mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15)
|
|
lmg %r0,%r15,STACK_PTREGS_GPRS(%r15)
|
|
lpswe __SF_EMPTY(%r15)
|
|
SYM_CODE_END(arch_rethook_trampoline)
|
|
|
|
#endif /* CONFIG_RETHOOK */
|