mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-29 14:05:19 +08:00
9774b96bce
[ Upstream commit 87c482bdfa
]
In the kernel image vmlinux.lds.S linker scripts the .altinstructions
and __bug_table sections are 4- or 8-byte aligned because they hold 32-
and/or 64-bit values.
Most architectures use altinstructions and BUG() or WARN() in modules as
well, but in the module linker script (module.lds.S) those sections are
currently missing. As consequence the linker will store their content
byte-aligned by default, which then can lead to unnecessary unaligned
memory accesses by the CPU when those tables are processed at runtime.
Usually unaligned memory accesses are unnoticed, because either the
hardware (as on x86 CPUs) or in-kernel exception handlers (e.g. on
parisc or sparc) emulate and fix them up at runtime. Nevertheless, such
unaligned accesses introduce a performance penalty and can even crash
the kernel if there is a bug in the unalignment exception handlers
(which happened once to me on the parisc architecture and which is why I
noticed that issue at all).
This patch fixes a non-critical issue and might be backported at any time.
It's trivial and shouldn't introduce any regression because it simply
tells the linker to use a different (8-byte alignment) for those
sections by default.
Signed-off-by: Helge Deller <deller@gmx.de>
Link: https://lore.kernel.org/all/Yr8%2Fgr8e8I7tVX4d@p100/
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
70 lines
1.7 KiB
ArmAsm
70 lines
1.7 KiB
ArmAsm
/*
|
|
* Common module linker script, always used when linking a module.
|
|
* Archs are free to supply their own linker scripts. ld will
|
|
* combine them automatically.
|
|
*/
|
|
#ifdef CONFIG_CFI_CLANG
|
|
# include <asm/page.h>
|
|
# define ALIGN_CFI ALIGN(PAGE_SIZE)
|
|
# define SANITIZER_DISCARDS *(.eh_frame)
|
|
#else
|
|
# define ALIGN_CFI
|
|
# define SANITIZER_DISCARDS
|
|
#endif
|
|
|
|
SECTIONS {
|
|
/DISCARD/ : {
|
|
*(.discard)
|
|
*(.discard.*)
|
|
SANITIZER_DISCARDS
|
|
}
|
|
|
|
__ksymtab 0 : { *(SORT(___ksymtab+*)) }
|
|
__ksymtab_gpl 0 : { *(SORT(___ksymtab_gpl+*)) }
|
|
__kcrctab 0 : { *(SORT(___kcrctab+*)) }
|
|
__kcrctab_gpl 0 : { *(SORT(___kcrctab_gpl+*)) }
|
|
|
|
.ctors 0 : ALIGN(8) { *(SORT(.ctors.*)) *(.ctors) }
|
|
.init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) }
|
|
|
|
.altinstructions 0 : ALIGN(8) { KEEP(*(.altinstructions)) }
|
|
__bug_table 0 : ALIGN(8) { KEEP(*(__bug_table)) }
|
|
__jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
|
|
|
|
__patchable_function_entries : { *(__patchable_function_entries) }
|
|
|
|
#ifdef CONFIG_LTO_CLANG
|
|
/*
|
|
* With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
|
|
* -ffunction-sections, which increases the size of the final module.
|
|
* Merge the split sections in the final binary.
|
|
*/
|
|
.bss : {
|
|
*(.bss .bss.[0-9a-zA-Z_]*)
|
|
*(.bss..L*)
|
|
}
|
|
|
|
.data : {
|
|
*(.data .data.[0-9a-zA-Z_]*)
|
|
*(.data..L*)
|
|
}
|
|
|
|
.rodata : {
|
|
*(.rodata .rodata.[0-9a-zA-Z_]*)
|
|
*(.rodata..L*)
|
|
}
|
|
|
|
/*
|
|
* With CONFIG_CFI_CLANG, we assume __cfi_check is at the beginning
|
|
* of the .text section, and is aligned to PAGE_SIZE.
|
|
*/
|
|
.text : ALIGN_CFI {
|
|
*(.text.__cfi_check)
|
|
*(.text .text.[0-9a-zA-Z_]* .text..L.cfi*)
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/* bring in arch-specific sections */
|
|
#include <asm/module.lds.h>
|