mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 05:24:12 +08:00
753b323687
For inline assembly, we place exception fixups out-of-line in the `.fixup` section such that these are out of the way of the fast path. This has a few drawbacks: * Since the fixup code is anonymous, backtraces will symbolize fixups as offsets from the nearest prior symbol, currently `__entry_tramp_text_end`. This is confusing, and painful to debug without access to the relevant vmlinux. * Since the exception handler adjusts the PC to execute the fixup, and the fixup uses a direct branch back into the function it fixes, backtraces of fixups miss the original function. This is confusing, and violates requirements for RELIABLE_STACKTRACE (and therefore LIVEPATCH). * Inline assembly and associated fixups are generated from templates, and we have many copies of logically identical fixups which only differ in which specific registers are written to and which address is branched to at the end of the fixup. This is potentially wasteful of I-cache resources, and makes it hard to add additional logic to fixups without significant bloat. * In the case of load_unaligned_zeropad(), the logic in the fixup requires a temporary register that we must allocate even in the fast-path where it will not be used. This patch address all four concerns for load_unaligned_zeropad() fixups by adding a dedicated exception handler which performs the fixup logic in exception context and subsequent returns back after the faulting instruction. For the moment, the fixup logic is identical to the old assembly fixup logic, but in future we could enhance this by taking the ESR and FAR into account to constrain the faults we try to fix up, or to specialize fixups for MTE tag check faults. Other than backtracing, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20211019160219.5202-13-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
96 lines
2.5 KiB
C
96 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
#ifndef __ASM_ASM_EXTABLE_H
|
|
#define __ASM_ASM_EXTABLE_H
|
|
|
|
#define EX_TYPE_NONE 0
|
|
#define EX_TYPE_FIXUP 1
|
|
#define EX_TYPE_BPF 2
|
|
#define EX_TYPE_UACCESS_ERR_ZERO 3
|
|
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
|
|
.pushsection __ex_table, "a"; \
|
|
.align 2; \
|
|
.long ((insn) - .); \
|
|
.long ((fixup) - .); \
|
|
.short (type); \
|
|
.short (data); \
|
|
.popsection;
|
|
|
|
/*
|
|
* Create an exception table entry for `insn`, which will branch to `fixup`
|
|
* when an unhandled fault is taken.
|
|
*/
|
|
.macro _asm_extable, insn, fixup
|
|
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
|
|
.endm
|
|
|
|
/*
|
|
* Create an exception table entry for `insn` if `fixup` is provided. Otherwise
|
|
* do nothing.
|
|
*/
|
|
.macro _cond_extable, insn, fixup
|
|
.ifnc \fixup,
|
|
_asm_extable \insn, \fixup
|
|
.endif
|
|
.endm
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#include <linux/bits.h>
|
|
#include <linux/stringify.h>
|
|
|
|
#include <asm/gpr-num.h>
|
|
|
|
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
|
|
".pushsection __ex_table, \"a\"\n" \
|
|
".align 2\n" \
|
|
".long ((" insn ") - .)\n" \
|
|
".long ((" fixup ") - .)\n" \
|
|
".short (" type ")\n" \
|
|
".short (" data ")\n" \
|
|
".popsection\n"
|
|
|
|
#define _ASM_EXTABLE(insn, fixup) \
|
|
__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
|
|
|
|
#define EX_DATA_REG_ERR_SHIFT 0
|
|
#define EX_DATA_REG_ERR GENMASK(4, 0)
|
|
#define EX_DATA_REG_ZERO_SHIFT 5
|
|
#define EX_DATA_REG_ZERO GENMASK(9, 5)
|
|
|
|
#define EX_DATA_REG(reg, gpr) \
|
|
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
|
|
|
|
#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
|
|
__DEFINE_ASM_GPR_NUMS \
|
|
__ASM_EXTABLE_RAW(#insn, #fixup, \
|
|
__stringify(EX_TYPE_UACCESS_ERR_ZERO), \
|
|
"(" \
|
|
EX_DATA_REG(ERR, err) " | " \
|
|
EX_DATA_REG(ZERO, zero) \
|
|
")")
|
|
|
|
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
|
|
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
|
|
|
|
#define EX_DATA_REG_DATA_SHIFT 0
|
|
#define EX_DATA_REG_DATA GENMASK(4, 0)
|
|
#define EX_DATA_REG_ADDR_SHIFT 5
|
|
#define EX_DATA_REG_ADDR GENMASK(9, 5)
|
|
|
|
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
|
|
__DEFINE_ASM_GPR_NUMS \
|
|
__ASM_EXTABLE_RAW(#insn, #fixup, \
|
|
__stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \
|
|
"(" \
|
|
EX_DATA_REG(DATA, data) " | " \
|
|
EX_DATA_REG(ADDR, addr) \
|
|
")")
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_ASM_EXTABLE_H */
|