objtool changes for v6.5:

- Build footprint & performance improvements:
 
     - Reduce memory usage with CONFIG_DEBUG_INFO=y
 
       In the worst case of an allyesconfig+CONFIG_DEBUG_INFO=y kernel, DWARF
       creates almost 200 million relocations, ballooning objtool's peak heap
       usage to 53GB.  These patches reduce that to 25GB.
 
       On a distro-type kernel with kernel IBT enabled, they reduce objtool's
       peak heap usage from 4.2GB to 2.8GB.
 
       These changes also improve the runtime significantly.
 
 - Debuggability improvements:
 
     - Add the unwind_debug command-line option, for more extend unwinding
       debugging output.
     - Limit unreachable warnings to once per function
     - Add verbose option for disassembling affected functions
     - Include backtrace in verbose mode
     - Detect missing __noreturn annotations
     - Ignore exc_double_fault() __noreturn warnings
     - Remove superfluous global_noreturns entries
     - Move noreturn function list to separate file
     - Add __kunit_abort() to noreturns
 
 - Unwinder improvements:
 
     - Allow stack operations in UNWIND_HINT_UNDEFINED regions
     - drm/vmwgfx: Add unwind hints around RBP clobber
 
 - Cleanups:
 
     - Move the x86 entry thunk restore code into thunk functions
     - x86/unwind/orc: Use swap() instead of open coding it
     - Remove unnecessary/unused variables
 
 - Fixes for modern stack canary handling
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmSaxcoRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1ht5w//f8mBoABct29pS4ib6pDwRZQDoG8fCA7M
 +KWjFD1AhX7RsJVEbM4uBUXdSWZD61xxIa8p8LO2jjzE5RyhM+EuNaisKujKqmfj
 uQTSnRhIRHMPqqVGK/gQxy1v4+3+12O32XFIJhAPYCp/dpbZJ2yKDsiHjapzZTDy
 BM+86hbIyHFmSl5uJcBFHEv6EGhoxwdrrrOxhpao1CqfAUi+uVgamHGwVqx+NtTY
 MvOmcy3/0ukHwDLON0MIMu9MSwvnXorD7+RSkYstwAM/k6ao/k78iJ31sOcynpRn
 ri0gmfygJsh2bxL4JUlY4ZeTs7PLWkj3i60deePc5u6EyV4JDJ2borUibs5oGoF6
 pN0AwbtubLHHhUI/v74B3E6K6ZGvLiEn9dsNTuXsJffD+qU2REb+WLhr4ut+E1Wi
 IKWrYh811yBLyOqFEW3XudZTiXSJlgi3eYiCxspEsKw2RIFFt2g6vYcwrIb0Hatw
 8R4/jCWk1nc6Wa3RQYsVnhkglAECSKQdDfS7p2e1hNUTjZuess4EEJjSLs8upIQ9
 D1bmuUxEzRxVwAZtXYNh0NKe7OtyOrqgsVTQuqxvWXq2CpC7Hqj8piVJWHdBWgHO
 0o2OQqjwSrzAtevpAIaYQv9zhPs1hV7CpBgzzqWGXrwJ3vM6YoSRLf0bg+5OkN8I
 O4U2xq2OVa8=
 =uNnc
 -----END PGP SIGNATURE-----

Merge tag 'objtool-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull objtool updates from Ingo Molar:
 "Build footprint & performance improvements:

   - Reduce memory usage with CONFIG_DEBUG_INFO=y

     In the worst case of an allyesconfig+CONFIG_DEBUG_INFO=y kernel,
     DWARF creates almost 200 million relocations, ballooning objtool's
     peak heap usage to 53GB. These patches reduce that to 25GB.

     On a distro-type kernel with kernel IBT enabled, they reduce
     objtool's peak heap usage from 4.2GB to 2.8GB.

     These changes also improve the runtime significantly.

  Debuggability improvements:

   - Add the unwind_debug command-line option, for more extend unwinding
     debugging output
   - Limit unreachable warnings to once per function
   - Add verbose option for disassembling affected functions
   - Include backtrace in verbose mode
   - Detect missing __noreturn annotations
   - Ignore exc_double_fault() __noreturn warnings
   - Remove superfluous global_noreturns entries
   - Move noreturn function list to separate file
   - Add __kunit_abort() to noreturns

  Unwinder improvements:

   - Allow stack operations in UNWIND_HINT_UNDEFINED regions
   - drm/vmwgfx: Add unwind hints around RBP clobber

  Cleanups:

   - Move the x86 entry thunk restore code into thunk functions
   - x86/unwind/orc: Use swap() instead of open coding it
   - Remove unnecessary/unused variables

  Fixes for modern stack canary handling"

* tag 'objtool-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (42 commits)
  x86/orc: Make the is_callthunk() definition depend on CONFIG_BPF_JIT=y
  objtool: Skip reading DWARF section data
  objtool: Free insns when done
  objtool: Get rid of reloc->rel[a]
  objtool: Shrink elf hash nodes
  objtool: Shrink reloc->sym_reloc_entry
  objtool: Get rid of reloc->jump_table_start
  objtool: Get rid of reloc->addend
  objtool: Get rid of reloc->type
  objtool: Get rid of reloc->offset
  objtool: Get rid of reloc->idx
  objtool: Get rid of reloc->list
  objtool: Allocate relocs in advance for new rela sections
  objtool: Add for_each_reloc()
  objtool: Don't free memory in elf_close()
  objtool: Keep GElf_Rel[a] structs synced
  objtool: Add elf_create_section_pair()
  objtool: Add mark_sec_changed()
  objtool: Fix reloc_hash size
  objtool: Consolidate rel/rela handling
  ...
This commit is contained in:
Linus Torvalds 2023-06-27 15:05:41 -07:00
commit 6f612579be
28 changed files with 1038 additions and 770 deletions

View File

@ -6598,6 +6598,12 @@
unknown_nmi_panic
[X86] Cause panic on unknown NMI.
unwind_debug [X86-64]
Enable unwinder debug output. This can be
useful for debugging certain unwinder error
conditions, including corrupt stacks and
bad/missing unwinder metadata.
usbcore.authorized_default=
[USB] Default USB device authorization:
(default -1 = authorized except for wireless USB,

View File

@ -1605,6 +1605,7 @@ static void add_cpu_to_masks(int cpu)
}
/* Activate a secondary processor. */
__no_stack_protector
void start_secondary(void *unused)
{
unsigned int cpu = raw_smp_processor_id();

View File

@ -26,17 +26,7 @@ SYM_FUNC_START(\name)
pushq %r11
call \func
jmp __thunk_restore
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
SYM_CODE_START_LOCAL(__thunk_restore)
popq %r11
popq %r10
popq %r9
@ -48,5 +38,11 @@ SYM_CODE_START_LOCAL(__thunk_restore)
popq %rdi
popq %rbp
RET
_ASM_NOKPROBE(__thunk_restore)
SYM_CODE_END(__thunk_restore)
SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)

View File

@ -113,7 +113,6 @@ extern void callthunks_patch_builtin_calls(void);
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
struct module *mod);
extern void *callthunks_translate_call_dest(void *dest);
extern bool is_callthunk(void *addr);
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
#else
static __always_inline void callthunks_patch_builtin_calls(void) {}
@ -124,10 +123,6 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
{
return dest;
}
static __always_inline bool is_callthunk(void *addr)
{
return false;
}
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
void *func)
{

View File

@ -76,9 +76,18 @@
#else
#define UNWIND_HINT_UNDEFINED \
UNWIND_HINT(UNWIND_HINT_TYPE_UNDEFINED, 0, 0, 0)
#define UNWIND_HINT_FUNC \
UNWIND_HINT(UNWIND_HINT_TYPE_FUNC, ORC_REG_SP, 8, 0)
#define UNWIND_HINT_SAVE \
UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
#define UNWIND_HINT_RESTORE \
UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_UNWIND_HINTS_H */

View File

@ -293,7 +293,8 @@ void *callthunks_translate_call_dest(void *dest)
return target ? : dest;
}
bool is_callthunk(void *addr)
#ifdef CONFIG_BPF_JIT
static bool is_callthunk(void *addr)
{
unsigned int tmpl_size = SKL_TMPL_SIZE;
void *tmpl = skl_call_thunk_template;
@ -306,7 +307,6 @@ bool is_callthunk(void *addr)
return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
}
#ifdef CONFIG_BPF_JIT
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
{
unsigned int tmpl_size = SKL_TMPL_SIZE;

View File

@ -16,8 +16,14 @@ ORC_HEADER;
#define orc_warn_current(args...) \
({ \
if (state->task == current && !state->error) \
static bool dumped_before; \
if (state->task == current && !state->error) { \
orc_warn(args); \
if (unwind_debug && !dumped_before) { \
dumped_before = true; \
unwind_dump(state); \
} \
} \
})
extern int __start_orc_unwind_ip[];
@ -26,8 +32,49 @@ extern struct orc_entry __start_orc_unwind[];
extern struct orc_entry __stop_orc_unwind[];
static bool orc_init __ro_after_init;
static bool unwind_debug __ro_after_init;
static unsigned int lookup_num_blocks __ro_after_init;
static int __init unwind_debug_cmdline(char *str)
{
unwind_debug = true;
return 0;
}
early_param("unwind_debug", unwind_debug_cmdline);
static void unwind_dump(struct unwind_state *state)
{
static bool dumped_before;
unsigned long word, *sp;
struct stack_info stack_info = {0};
unsigned long visit_mask = 0;
if (dumped_before)
return;
dumped_before = true;
printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
state->stack_info.type, state->stack_info.next_sp,
state->stack_mask, state->graph_idx);
for (sp = __builtin_frame_address(0); sp;
sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
break;
for (; sp < stack_info.end; sp++) {
word = READ_ONCE_NOCHECK(*sp);
printk_deferred("%0*lx: %0*lx (%pB)\n", BITS_PER_LONG/4,
(unsigned long)sp, BITS_PER_LONG/4,
word, (void *)word);
}
}
}
static inline unsigned long orc_ip(const int *ip)
{
return (unsigned long)ip + *ip;
@ -139,21 +186,6 @@ static struct orc_entry null_orc_entry = {
.type = ORC_TYPE_CALL
};
#ifdef CONFIG_CALL_THUNKS
static struct orc_entry *orc_callthunk_find(unsigned long ip)
{
if (!is_callthunk((void *)ip))
return NULL;
return &null_orc_entry;
}
#else
static struct orc_entry *orc_callthunk_find(unsigned long ip)
{
return NULL;
}
#endif
/* Fake frame pointer entry -- used as a fallback for generated code */
static struct orc_entry orc_fp_entry = {
.type = ORC_TYPE_CALL,
@ -206,11 +238,7 @@ static struct orc_entry *orc_find(unsigned long ip)
if (orc)
return orc;
orc = orc_ftrace_find(ip);
if (orc)
return orc;
return orc_callthunk_find(ip);
return orc_ftrace_find(ip);
}
#ifdef CONFIG_MODULES
@ -222,7 +250,6 @@ static struct orc_entry *cur_orc_table = __start_orc_unwind;
static void orc_sort_swap(void *_a, void *_b, int size)
{
struct orc_entry *orc_a, *orc_b;
struct orc_entry orc_tmp;
int *a = _a, *b = _b, tmp;
int delta = _b - _a;
@ -234,9 +261,7 @@ static void orc_sort_swap(void *_a, void *_b, int size)
/* Swap the corresponding .orc_unwind entries: */
orc_a = cur_orc_table + (a - cur_orc_ip_table);
orc_b = cur_orc_table + (b - cur_orc_ip_table);
orc_tmp = *orc_a;
*orc_a = *orc_b;
*orc_b = orc_tmp;
swap(*orc_a, *orc_b);
}
static int orc_sort_cmp(const void *_a, const void *_b)

View File

@ -105,10 +105,14 @@
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
asm volatile ( \
UNWIND_HINT_SAVE \
"push %%rbp;" \
UNWIND_HINT_UNDEFINED \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_OUT \
"pop %%rbp;" : \
"pop %%rbp;" \
UNWIND_HINT_RESTORE : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
@ -130,10 +134,14 @@
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
asm volatile ("push %%rbp;" \
asm volatile ( \
UNWIND_HINT_SAVE \
"push %%rbp;" \
UNWIND_HINT_UNDEFINED \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_IN \
"pop %%rbp" : \
"pop %%rbp;" \
UNWIND_HINT_RESTORE : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \

View File

@ -487,6 +487,7 @@ static void lkdtm_UNSET_SMEP(void)
* the cr4 writing instruction.
*/
insn = (unsigned char *)native_write_cr4;
OPTIMIZER_HIDE_VAR(insn);
for (i = 0; i < MOV_CR4_DEPTH; i++) {
/* mov %rdi, %cr4 */
if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)

View File

@ -255,6 +255,18 @@
*/
#define __noreturn __attribute__((__noreturn__))
/*
* Optional: only supported since GCC >= 11.1, clang >= 7.0.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fstack_005fprotector-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#no-stack-protector-safebuffers
*/
#if __has_attribute(__no_stack_protector__)
# define __no_stack_protector __attribute__((__no_stack_protector__))
#else
# define __no_stack_protector
#endif
/*
* Optional: not supported by gcc.
*

View File

@ -873,7 +873,8 @@ static void __init print_unknown_bootoptions(void)
memblock_free(unknown_options, len);
}
asmlinkage __visible void __init __no_sanitize_address __noreturn start_kernel(void)
asmlinkage __visible __init __no_sanitize_address __noreturn __no_stack_protector
void start_kernel(void)
{
char *command_line;
char *after_dashes;
@ -1073,7 +1074,13 @@ asmlinkage __visible void __init __no_sanitize_address __noreturn start_kernel(v
/* Do the rest non-__init'ed, we're now alive */
arch_call_rest_init();
/*
* Avoid stack canaries in callers of boot_init_stack_canary for gcc-10
* and older.
*/
#if !__has_attribute(__no_stack_protector__)
prevent_tail_call_optimization();
#endif
}
/* Call all constructor functions linked into the kernel. */

View File

@ -6,10 +6,6 @@
#include <stdbool.h>
#include <stdint.h>
#ifndef NORETURN
#define NORETURN __attribute__((__noreturn__))
#endif
enum parse_opt_type {
/* special types */
OPTION_END,
@ -183,9 +179,9 @@ extern int parse_options_subcommand(int argc, const char **argv,
const char *const subcommands[],
const char *usagestr[], int flags);
extern NORETURN void usage_with_options(const char * const *usagestr,
extern __noreturn void usage_with_options(const char * const *usagestr,
const struct option *options);
extern NORETURN __attribute__((format(printf,3,4)))
extern __noreturn __attribute__((format(printf,3,4)))
void usage_with_options_msg(const char * const *usagestr,
const struct option *options,
const char *fmt, ...);

View File

@ -5,8 +5,7 @@
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#define NORETURN __attribute__((__noreturn__))
#include <linux/compiler.h>
static inline void report(const char *prefix, const char *err, va_list params)
{
@ -15,7 +14,7 @@ static inline void report(const char *prefix, const char *err, va_list params)
fprintf(stderr, " %s%s\n", prefix, msg);
}
static NORETURN inline void die(const char *err, ...)
static __noreturn inline void die(const char *err, ...)
{
va_list params;

View File

@ -244,6 +244,11 @@ To achieve the validation, objtool enforces the following rules:
Objtool warnings
----------------
NOTE: When requesting help with an objtool warning, please recreate with
OBJTOOL_VERBOSE=1 (e.g., "make OBJTOOL_VERBOSE=1") and send the full
output, including any disassembly or backtrace below the warning, to the
objtool maintainers.
For asm files, if you're getting an error which doesn't make sense,
first make sure that the affected code follows the above rules.
@ -298,6 +303,11 @@ the objtool maintainers.
If it's not actually in a callable function (e.g. kernel entry code),
change ENDPROC to END.
3. file.o: warning: objtool: foo+0x48c: bar() is missing a __noreturn annotation
The call from foo() to bar() doesn't return, but bar() is missing the
__noreturn annotation. NOTE: In addition to annotating the function
with __noreturn, please also add it to tools/objtool/noreturns.h.
4. file.o: warning: objtool: func(): can't find starting instruction
or

View File

@ -1,10 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_ARCH_ELF
#define _OBJTOOL_ARCH_ELF
#define R_NONE R_PPC_NONE
#define R_ABS64 R_PPC64_ADDR64
#define R_ABS32 R_PPC_ADDR32
#define R_DATA32 R_PPC_REL32
#define R_DATA64 R_PPC64_REL64
#define R_TEXT32 R_PPC_REL32
#define R_TEXT64 R_PPC64_REL32
#endif /* _OBJTOOL_ARCH_ELF */

View File

@ -84,7 +84,7 @@ bool arch_pc_relative_reloc(struct reloc *reloc)
* All relocation types where P (the address of the target)
* is included in the computation.
*/
switch (reloc->type) {
switch (reloc_type(reloc)) {
case R_X86_64_PC8:
case R_X86_64_PC16:
case R_X86_64_PC32:
@ -623,11 +623,11 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
if (!immr || strcmp(immr->sym->name, "pv_ops"))
break;
idx = (immr->addend + 8) / sizeof(void *);
idx = (reloc_addend(immr) + 8) / sizeof(void *);
func = disp->sym;
if (disp->sym->type == STT_SECTION)
func = find_symbol_by_offset(disp->sym->sec, disp->addend);
func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp));
if (!func) {
WARN("no func for pv_ops[]");
return -1;

View File

@ -1,8 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _OBJTOOL_ARCH_ELF
#define _OBJTOOL_ARCH_ELF
#define R_NONE R_X86_64_NONE
#define R_ABS64 R_X86_64_64
#define R_ABS32 R_X86_64_32
#define R_ABS64 R_X86_64_64
#define R_DATA32 R_X86_64_PC32
#define R_DATA64 R_X86_64_PC32
#define R_TEXT32 R_X86_64_PC32
#define R_TEXT64 R_X86_64_PC32
#endif /* _OBJTOOL_ARCH_ELF */

View File

@ -99,10 +99,10 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
!text_reloc->sym->sec->rodata)
return NULL;
table_offset = text_reloc->addend;
table_offset = reloc_addend(text_reloc);
table_sec = text_reloc->sym->sec;
if (text_reloc->type == R_X86_64_PC32)
if (reloc_type(text_reloc) == R_X86_64_PC32)
table_offset += 4;
/*
@ -132,7 +132,7 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
* indicates a rare GCC quirk/bug which can leave dead
* code behind.
*/
if (text_reloc->type == R_X86_64_PC32)
if (reloc_type(text_reloc) == R_X86_64_PC32)
file->ignore_unreachables = true;
return rodata_reloc;

View File

@ -93,6 +93,7 @@ static const struct option check_options[] = {
OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
OPT_BOOLEAN('v', "verbose", &opts.verbose, "verbose warnings"),
OPT_END(),
};
@ -118,6 +119,10 @@ int cmd_parse_options(int argc, const char **argv, const char * const usage[])
parse_options(envc, envv, check_options, env_usage, 0);
}
env = getenv("OBJTOOL_VERBOSE");
if (env && !strcmp(env, "1"))
opts.verbose = true;
argc = parse_options(argc, argv, check_options, usage, 0);
if (argc != 1)
usage_with_options(usage, check_options);

File diff suppressed because it is too large Load Diff

View File

@ -32,16 +32,52 @@ static inline u32 str_hash(const char *str)
#define __elf_table(name) (elf->name##_hash)
#define __elf_bits(name) (elf->name##_bits)
#define __elf_table_entry(name, key) \
__elf_table(name)[hash_min(key, __elf_bits(name))]
#define elf_hash_add(name, node, key) \
hlist_add_head(node, &__elf_table(name)[hash_min(key, __elf_bits(name))])
({ \
struct elf_hash_node *__node = node; \
__node->next = __elf_table_entry(name, key); \
__elf_table_entry(name, key) = __node; \
})
static inline void __elf_hash_del(struct elf_hash_node *node,
struct elf_hash_node **head)
{
struct elf_hash_node *cur, *prev;
if (node == *head) {
*head = node->next;
return;
}
for (prev = NULL, cur = *head; cur; prev = cur, cur = cur->next) {
if (cur == node) {
prev->next = cur->next;
break;
}
}
}
#define elf_hash_del(name, node, key) \
__elf_hash_del(node, &__elf_table_entry(name, key))
#define elf_list_entry(ptr, type, member) \
({ \
typeof(ptr) __ptr = (ptr); \
__ptr ? container_of(__ptr, type, member) : NULL; \
})
#define elf_hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &__elf_table(name)[hash_min(key, __elf_bits(name))], member)
for (obj = elf_list_entry(__elf_table_entry(name, key), typeof(*obj), member); \
obj; \
obj = elf_list_entry(obj->member.next, typeof(*(obj)), member))
#define elf_alloc_hash(name, size) \
({ \
__elf_bits(name) = max(10, ilog2(size)); \
__elf_table(name) = mmap(NULL, sizeof(struct hlist_head) << __elf_bits(name), \
__elf_table(name) = mmap(NULL, sizeof(struct elf_hash_node *) << __elf_bits(name), \
PROT_READ|PROT_WRITE, \
MAP_PRIVATE|MAP_ANON, -1, 0); \
if (__elf_table(name) == (void *)-1L) { \
@ -233,21 +269,22 @@ struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *se
unsigned long offset, unsigned int len)
{
struct reloc *reloc, *r = NULL;
struct section *rsec;
unsigned long o;
if (!sec->reloc)
rsec = sec->rsec;
if (!rsec)
return NULL;
sec = sec->reloc;
for_offset_range(o, offset, offset + len) {
elf_hash_for_each_possible(reloc, reloc, hash,
sec_offset_hash(sec, o)) {
if (reloc->sec != sec)
sec_offset_hash(rsec, o)) {
if (reloc->sec != rsec)
continue;
if (reloc->offset >= offset && reloc->offset < offset + len) {
if (!r || reloc->offset < r->offset)
if (reloc_offset(reloc) >= offset &&
reloc_offset(reloc) < offset + len) {
if (!r || reloc_offset(reloc) < reloc_offset(r))
r = reloc;
}
}
@ -263,6 +300,11 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
return find_reloc_by_dest_range(elf, sec, offset, 1);
}
static bool is_dwarf_section(struct section *sec)
{
return !strncmp(sec->name, ".debug_", 7);
}
static int read_sections(struct elf *elf)
{
Elf_Scn *s = NULL;
@ -293,7 +335,6 @@ static int read_sections(struct elf *elf)
sec = &elf->section_data[i];
INIT_LIST_HEAD(&sec->symbol_list);
INIT_LIST_HEAD(&sec->reloc_list);
s = elf_getscn(elf->elf, i);
if (!s) {
@ -314,7 +355,7 @@ static int read_sections(struct elf *elf)
return -1;
}
if (sec->sh.sh_size != 0) {
if (sec->sh.sh_size != 0 && !is_dwarf_section(sec)) {
sec->data = elf_getdata(s, NULL);
if (!sec->data) {
WARN_ELF("elf_getdata");
@ -328,12 +369,12 @@ static int read_sections(struct elf *elf)
}
}
if (sec->sh.sh_flags & SHF_EXECINSTR)
elf->text_size += sec->sh.sh_size;
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(section, &sec->hash, sec->idx);
elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
if (is_reloc_sec(sec))
elf->num_relocs += sec_num_entries(sec);
}
if (opts.stats) {
@ -356,7 +397,6 @@ static void elf_add_symbol(struct elf *elf, struct symbol *sym)
struct rb_node *pnode;
struct symbol *iter;
INIT_LIST_HEAD(&sym->reloc_list);
INIT_LIST_HEAD(&sym->pv_target);
sym->alias = sym;
@ -407,7 +447,7 @@ static int read_symbols(struct elf *elf)
if (symtab_shndx)
shndx_data = symtab_shndx->data;
symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
symbols_nr = sec_num_entries(symtab);
} else {
/*
* A missing symbol table is actually possible if it's an empty
@ -533,54 +573,19 @@ err:
return -1;
}
static struct section *elf_create_reloc_section(struct elf *elf,
struct section *base,
int reltype);
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
unsigned int type, struct symbol *sym, s64 addend)
/*
* @sym's idx has changed. Update the relocs which reference it.
*/
static int elf_update_sym_relocs(struct elf *elf, struct symbol *sym)
{
struct reloc *reloc;
if (!sec->reloc && !elf_create_reloc_section(elf, sec, SHT_RELA))
return -1;
reloc = malloc(sizeof(*reloc));
if (!reloc) {
perror("malloc");
return -1;
}
memset(reloc, 0, sizeof(*reloc));
reloc->sec = sec->reloc;
reloc->offset = offset;
reloc->type = type;
reloc->sym = sym;
reloc->addend = addend;
list_add_tail(&reloc->sym_reloc_entry, &sym->reloc_list);
list_add_tail(&reloc->list, &sec->reloc->reloc_list);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
sec->reloc->sh.sh_size += sec->reloc->sh.sh_entsize;
sec->reloc->changed = true;
for (reloc = sym->relocs; reloc; reloc = reloc->sym_next_reloc)
set_reloc_sym(elf, reloc, reloc->sym->idx);
return 0;
}
/*
* Ensure that any reloc section containing references to @sym is marked
* changed such that it will get re-generated in elf_rebuild_reloc_sections()
* with the new symbol index.
*/
static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym)
{
struct reloc *reloc;
list_for_each_entry(reloc, &sym->reloc_list, sym_reloc_entry)
reloc->sec->changed = true;
}
/*
* The libelf API is terrible; gelf_update_sym*() takes a data block relative
* index value, *NOT* the symbol index. As such, iterate the data blocks and
@ -655,7 +660,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
symtab_data->d_align = 1;
symtab_data->d_type = ELF_T_SYM;
symtab->changed = true;
mark_sec_changed(elf, symtab, true);
symtab->truncate = true;
if (t) {
@ -670,7 +675,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
shndx_data->d_align = sizeof(Elf32_Word);
shndx_data->d_type = ELF_T_WORD;
symtab_shndx->changed = true;
mark_sec_changed(elf, symtab_shndx, true);
symtab_shndx->truncate = true;
}
@ -734,7 +739,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
return NULL;
}
new_idx = symtab->sh.sh_size / symtab->sh.sh_entsize;
new_idx = sec_num_entries(symtab);
if (GELF_ST_BIND(sym->sym.st_info) != STB_LOCAL)
goto non_local;
@ -746,18 +751,19 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
first_non_local = symtab->sh.sh_info;
old = find_symbol_by_index(elf, first_non_local);
if (old) {
elf_hash_del(symbol, &old->hash, old->idx);
elf_hash_add(symbol, &old->hash, new_idx);
old->idx = new_idx;
hlist_del(&old->hash);
elf_hash_add(symbol, &old->hash, old->idx);
elf_dirty_reloc_sym(elf, old);
if (elf_update_symbol(elf, symtab, symtab_shndx, old)) {
WARN("elf_update_symbol move");
return NULL;
}
if (elf_update_sym_relocs(elf, old))
return NULL;
new_idx = first_non_local;
}
@ -774,11 +780,11 @@ non_local:
}
symtab->sh.sh_size += symtab->sh.sh_entsize;
symtab->changed = true;
mark_sec_changed(elf, symtab, true);
if (symtab_shndx) {
symtab_shndx->sh.sh_size += sizeof(Elf32_Word);
symtab_shndx->changed = true;
mark_sec_changed(elf, symtab_shndx, true);
}
return sym;
@ -841,13 +847,57 @@ elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size)
return sym;
}
int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int type,
struct section *insn_sec, unsigned long insn_off)
static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
unsigned int reloc_idx,
unsigned long offset, struct symbol *sym,
s64 addend, unsigned int type)
{
struct reloc *reloc, empty = { 0 };
if (reloc_idx >= sec_num_entries(rsec)) {
WARN("%s: bad reloc_idx %u for %s with %d relocs",
__func__, reloc_idx, rsec->name, sec_num_entries(rsec));
return NULL;
}
reloc = &rsec->relocs[reloc_idx];
if (memcmp(reloc, &empty, sizeof(empty))) {
WARN("%s: %s: reloc %d already initialized!",
__func__, rsec->name, reloc_idx);
return NULL;
}
reloc->sec = rsec;
reloc->sym = sym;
set_reloc_offset(elf, reloc, offset);
set_reloc_sym(elf, reloc, sym->idx);
set_reloc_type(elf, reloc, type);
set_reloc_addend(elf, reloc, addend);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
reloc->sym_next_reloc = sym->relocs;
sym->relocs = reloc;
return reloc;
}
struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
unsigned long offset,
unsigned int reloc_idx,
struct section *insn_sec,
unsigned long insn_off)
{
struct symbol *sym = insn_sec->sym;
int addend = insn_off;
if (!(insn_sec->sh.sh_flags & SHF_EXECINSTR)) {
WARN("bad call to %s() for data symbol %s",
__func__, sym->name);
return NULL;
}
if (!sym) {
/*
* Due to how weak functions work, we must use section based
@ -857,108 +907,86 @@ int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
*/
sym = elf_create_section_symbol(elf, insn_sec);
if (!sym)
return -1;
return NULL;
insn_sec->sym = sym;
}
return elf_add_reloc(elf, sec, offset, type, sym, addend);
return elf_init_reloc(elf, sec->rsec, reloc_idx, offset, sym, addend,
elf_text_rela_type(elf));
}
static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
struct reloc *elf_init_reloc_data_sym(struct elf *elf, struct section *sec,
unsigned long offset,
unsigned int reloc_idx,
struct symbol *sym,
s64 addend)
{
if (!gelf_getrel(sec->data, i, &reloc->rel)) {
WARN_ELF("gelf_getrel");
return -1;
}
reloc->type = GELF_R_TYPE(reloc->rel.r_info);
reloc->addend = 0;
reloc->offset = reloc->rel.r_offset;
*symndx = GELF_R_SYM(reloc->rel.r_info);
return 0;
if (sym->sec && (sec->sh.sh_flags & SHF_EXECINSTR)) {
WARN("bad call to %s() for text symbol %s",
__func__, sym->name);
return NULL;
}
static int read_rela_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
{
if (!gelf_getrela(sec->data, i, &reloc->rela)) {
WARN_ELF("gelf_getrela");
return -1;
}
reloc->type = GELF_R_TYPE(reloc->rela.r_info);
reloc->addend = reloc->rela.r_addend;
reloc->offset = reloc->rela.r_offset;
*symndx = GELF_R_SYM(reloc->rela.r_info);
return 0;
return elf_init_reloc(elf, sec->rsec, reloc_idx, offset, sym, addend,
elf_data_rela_type(elf));
}
static int read_relocs(struct elf *elf)
{
unsigned long nr_reloc, max_reloc = 0, tot_reloc = 0;
struct section *sec;
unsigned long nr_reloc, max_reloc = 0;
struct section *rsec;
struct reloc *reloc;
unsigned int symndx;
struct symbol *sym;
int i;
if (!elf_alloc_hash(reloc, elf->text_size / 16))
if (!elf_alloc_hash(reloc, elf->num_relocs))
return -1;
list_for_each_entry(sec, &elf->sections, list) {
if ((sec->sh.sh_type != SHT_RELA) &&
(sec->sh.sh_type != SHT_REL))
list_for_each_entry(rsec, &elf->sections, list) {
if (!is_reloc_sec(rsec))
continue;
sec->base = find_section_by_index(elf, sec->sh.sh_info);
if (!sec->base) {
rsec->base = find_section_by_index(elf, rsec->sh.sh_info);
if (!rsec->base) {
WARN("can't find base section for reloc section %s",
sec->name);
rsec->name);
return -1;
}
sec->base->reloc = sec;
rsec->base->rsec = rsec;
nr_reloc = 0;
sec->reloc_data = calloc(sec->sh.sh_size / sec->sh.sh_entsize, sizeof(*reloc));
if (!sec->reloc_data) {
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(*reloc));
if (!rsec->relocs) {
perror("calloc");
return -1;
}
for (i = 0; i < sec->sh.sh_size / sec->sh.sh_entsize; i++) {
reloc = &sec->reloc_data[i];
switch (sec->sh.sh_type) {
case SHT_REL:
if (read_rel_reloc(sec, i, reloc, &symndx))
return -1;
break;
case SHT_RELA:
if (read_rela_reloc(sec, i, reloc, &symndx))
return -1;
break;
default: return -1;
}
for (i = 0; i < sec_num_entries(rsec); i++) {
reloc = &rsec->relocs[i];
reloc->sec = sec;
reloc->idx = i;
reloc->sec = rsec;
symndx = reloc_sym(reloc);
reloc->sym = sym = find_symbol_by_index(elf, symndx);
if (!reloc->sym) {
WARN("can't find reloc entry symbol %d for %s",
symndx, sec->name);
symndx, rsec->name);
return -1;
}
list_add_tail(&reloc->sym_reloc_entry, &sym->reloc_list);
list_add_tail(&reloc->list, &sec->reloc_list);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
reloc->sym_next_reloc = sym->relocs;
sym->relocs = reloc;
nr_reloc++;
}
max_reloc = max(max_reloc, nr_reloc);
tot_reloc += nr_reloc;
}
if (opts.stats) {
printf("max_reloc: %lu\n", max_reloc);
printf("tot_reloc: %lu\n", tot_reloc);
printf("num_relocs: %lu\n", elf->num_relocs);
printf("reloc_bits: %d\n", elf->reloc_bits);
}
@ -1053,13 +1081,14 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
len = strtab->sh.sh_size;
strtab->sh.sh_size += data->d_size;
strtab->changed = true;
mark_sec_changed(elf, strtab, true);
return len;
}
struct section *elf_create_section(struct elf *elf, const char *name,
unsigned int sh_flags, size_t entsize, int nr)
size_t entsize, unsigned int nr)
{
struct section *sec, *shstrtab;
size_t size = entsize * nr;
@ -1073,7 +1102,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
memset(sec, 0, sizeof(*sec));
INIT_LIST_HEAD(&sec->symbol_list);
INIT_LIST_HEAD(&sec->reloc_list);
s = elf_newscn(elf->elf);
if (!s) {
@ -1088,7 +1116,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
}
sec->idx = elf_ndxscn(s);
sec->changed = true;
sec->data = elf_newdata(s);
if (!sec->data) {
@ -1117,7 +1144,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->sh.sh_entsize = entsize;
sec->sh.sh_type = SHT_PROGBITS;
sec->sh.sh_addralign = 1;
sec->sh.sh_flags = SHF_ALLOC | sh_flags;
sec->sh.sh_flags = SHF_ALLOC;
/* Add section name to .shstrtab (or .strtab for Clang) */
shstrtab = find_section_by_name(elf, ".shstrtab");
@ -1135,158 +1162,66 @@ struct section *elf_create_section(struct elf *elf, const char *name,
elf_hash_add(section, &sec->hash, sec->idx);
elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
elf->changed = true;
mark_sec_changed(elf, sec, true);
return sec;
}
static struct section *elf_create_rel_reloc_section(struct elf *elf, struct section *base)
static struct section *elf_create_rela_section(struct elf *elf,
struct section *sec,
unsigned int reloc_nr)
{
char *relocname;
struct section *sec;
struct section *rsec;
char *rsec_name;
relocname = malloc(strlen(base->name) + strlen(".rel") + 1);
if (!relocname) {
rsec_name = malloc(strlen(sec->name) + strlen(".rela") + 1);
if (!rsec_name) {
perror("malloc");
return NULL;
}
strcpy(relocname, ".rel");
strcat(relocname, base->name);
strcpy(rsec_name, ".rela");
strcat(rsec_name, sec->name);
sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rel), 0);
free(relocname);
rsec = elf_create_section(elf, rsec_name, elf_rela_size(elf), reloc_nr);
free(rsec_name);
if (!rsec)
return NULL;
rsec->data->d_type = ELF_T_RELA;
rsec->sh.sh_type = SHT_RELA;
rsec->sh.sh_addralign = elf_addr_size(elf);
rsec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
rsec->sh.sh_info = sec->idx;
rsec->sh.sh_flags = SHF_INFO_LINK;
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(struct reloc));
if (!rsec->relocs) {
perror("calloc");
return NULL;
}
sec->rsec = rsec;
rsec->base = sec;
return rsec;
}
struct section *elf_create_section_pair(struct elf *elf, const char *name,
size_t entsize, unsigned int nr,
unsigned int reloc_nr)
{
struct section *sec;
sec = elf_create_section(elf, name, entsize, nr);
if (!sec)
return NULL;
base->reloc = sec;
sec->base = base;
sec->sh.sh_type = SHT_REL;
sec->sh.sh_addralign = 8;
sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
sec->sh.sh_info = base->idx;
sec->sh.sh_flags = SHF_INFO_LINK;
return sec;
}
static struct section *elf_create_rela_reloc_section(struct elf *elf, struct section *base)
{
char *relocname;
struct section *sec;
int addrsize = elf_class_addrsize(elf);
relocname = malloc(strlen(base->name) + strlen(".rela") + 1);
if (!relocname) {
perror("malloc");
return NULL;
}
strcpy(relocname, ".rela");
strcat(relocname, base->name);
if (addrsize == sizeof(u32))
sec = elf_create_section(elf, relocname, 0, sizeof(Elf32_Rela), 0);
else
sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rela), 0);
free(relocname);
if (!sec)
if (!elf_create_rela_section(elf, sec, reloc_nr))
return NULL;
base->reloc = sec;
sec->base = base;
sec->sh.sh_type = SHT_RELA;
sec->sh.sh_addralign = addrsize;
sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
sec->sh.sh_info = base->idx;
sec->sh.sh_flags = SHF_INFO_LINK;
return sec;
}
static struct section *elf_create_reloc_section(struct elf *elf,
struct section *base,
int reltype)
{
switch (reltype) {
case SHT_REL: return elf_create_rel_reloc_section(elf, base);
case SHT_RELA: return elf_create_rela_reloc_section(elf, base);
default: return NULL;
}
}
static int elf_rebuild_rel_reloc_section(struct section *sec)
{
struct reloc *reloc;
int idx = 0;
void *buf;
/* Allocate a buffer for relocations */
buf = malloc(sec->sh.sh_size);
if (!buf) {
perror("malloc");
return -1;
}
sec->data->d_buf = buf;
sec->data->d_size = sec->sh.sh_size;
sec->data->d_type = ELF_T_REL;
idx = 0;
list_for_each_entry(reloc, &sec->reloc_list, list) {
reloc->rel.r_offset = reloc->offset;
reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
if (!gelf_update_rel(sec->data, idx, &reloc->rel)) {
WARN_ELF("gelf_update_rel");
return -1;
}
idx++;
}
return 0;
}
static int elf_rebuild_rela_reloc_section(struct section *sec)
{
struct reloc *reloc;
int idx = 0;
void *buf;
/* Allocate a buffer for relocations with addends */
buf = malloc(sec->sh.sh_size);
if (!buf) {
perror("malloc");
return -1;
}
sec->data->d_buf = buf;
sec->data->d_size = sec->sh.sh_size;
sec->data->d_type = ELF_T_RELA;
idx = 0;
list_for_each_entry(reloc, &sec->reloc_list, list) {
reloc->rela.r_offset = reloc->offset;
reloc->rela.r_addend = reloc->addend;
reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
if (!gelf_update_rela(sec->data, idx, &reloc->rela)) {
WARN_ELF("gelf_update_rela");
return -1;
}
idx++;
}
return 0;
}
static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
{
switch (sec->sh.sh_type) {
case SHT_REL: return elf_rebuild_rel_reloc_section(sec);
case SHT_RELA: return elf_rebuild_rela_reloc_section(sec);
default: return -1;
}
}
int elf_write_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len,
const char *insn)
@ -1299,37 +1234,8 @@ int elf_write_insn(struct elf *elf, struct section *sec,
}
memcpy(data->d_buf + offset, insn, len);
elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY);
elf->changed = true;
return 0;
}
int elf_write_reloc(struct elf *elf, struct reloc *reloc)
{
struct section *sec = reloc->sec;
if (sec->sh.sh_type == SHT_REL) {
reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
reloc->rel.r_offset = reloc->offset;
if (!gelf_update_rel(sec->data, reloc->idx, &reloc->rel)) {
WARN_ELF("gelf_update_rel");
return -1;
}
} else {
reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
reloc->rela.r_addend = reloc->addend;
reloc->rela.r_offset = reloc->offset;
if (!gelf_update_rela(sec->data, reloc->idx, &reloc->rela)) {
WARN_ELF("gelf_update_rela");
return -1;
}
}
elf->changed = true;
mark_sec_changed(elf, sec, true);
return 0;
}
@ -1401,25 +1307,20 @@ int elf_write(struct elf *elf)
if (sec->truncate)
elf_truncate_section(elf, sec);
if (sec->changed) {
if (sec_changed(sec)) {
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
WARN_ELF("elf_getscn");
return -1;
}
/* Note this also flags the section dirty */
if (!gelf_update_shdr(s, &sec->sh)) {
WARN_ELF("gelf_update_shdr");
return -1;
}
if (sec->base &&
elf_rebuild_reloc_section(elf, sec)) {
WARN("elf_rebuild_reloc_section");
return -1;
}
sec->changed = false;
elf->changed = true;
mark_sec_changed(elf, sec, false);
}
}
@ -1439,30 +1340,14 @@ int elf_write(struct elf *elf)
void elf_close(struct elf *elf)
{
struct section *sec, *tmpsec;
struct symbol *sym, *tmpsym;
struct reloc *reloc, *tmpreloc;
if (elf->elf)
elf_end(elf->elf);
if (elf->fd > 0)
close(elf->fd);
list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
list_del(&sym->list);
hash_del(&sym->hash);
}
list_for_each_entry_safe(reloc, tmpreloc, &sec->reloc_list, list) {
list_del(&reloc->list);
hash_del(&reloc->hash);
}
list_del(&sec->list);
free(sec->reloc_data);
}
free(elf->symbol_data);
free(elf->section_data);
free(elf);
/*
* NOTE: All remaining allocations are leaked on purpose. Objtool is
* about to exit anyway.
*/
}

View File

@ -37,6 +37,7 @@ struct opts {
bool no_unreachable;
bool sec_address;
bool stats;
bool verbose;
};
extern struct opts opts;

View File

@ -36,6 +36,7 @@ struct cfi_state {
bool drap;
bool signal;
bool end;
bool force_undefined;
};
#endif /* _OBJTOOL_CFI_H */

View File

@ -12,6 +12,7 @@
#include <linux/hashtable.h>
#include <linux/rbtree.h>
#include <linux/jhash.h>
#include <arch/elf.h>
#ifdef LIBELF_USE_DEPRECATED
# define elf_getshdrnum elf_getshnum
@ -25,28 +26,31 @@
#define ELF_C_READ_MMAP ELF_C_READ
#endif
struct elf_hash_node {
struct elf_hash_node *next;
};
struct section {
struct list_head list;
struct hlist_node hash;
struct hlist_node name_hash;
struct elf_hash_node hash;
struct elf_hash_node name_hash;
GElf_Shdr sh;
struct rb_root_cached symbol_tree;
struct list_head symbol_list;
struct list_head reloc_list;
struct section *base, *reloc;
struct section *base, *rsec;
struct symbol *sym;
Elf_Data *data;
char *name;
int idx;
bool changed, text, rodata, noinstr, init, truncate;
struct reloc *reloc_data;
bool _changed, text, rodata, noinstr, init, truncate;
struct reloc *relocs;
};
struct symbol {
struct list_head list;
struct rb_node node;
struct hlist_node hash;
struct hlist_node name_hash;
struct elf_hash_node hash;
struct elf_hash_node name_hash;
GElf_Sym sym;
struct section *sec;
char *name;
@ -61,37 +65,27 @@ struct symbol {
u8 return_thunk : 1;
u8 fentry : 1;
u8 profiling_func : 1;
u8 warned : 1;
struct list_head pv_target;
struct list_head reloc_list;
struct reloc *relocs;
};
struct reloc {
struct list_head list;
struct hlist_node hash;
union {
GElf_Rela rela;
GElf_Rel rel;
};
struct elf_hash_node hash;
struct section *sec;
struct symbol *sym;
struct list_head sym_reloc_entry;
unsigned long offset;
unsigned int type;
s64 addend;
int idx;
bool jump_table_start;
struct reloc *sym_next_reloc;
};
#define ELF_HASH_BITS 20
struct elf {
Elf *elf;
GElf_Ehdr ehdr;
int fd;
bool changed;
char *name;
unsigned int text_size, num_files;
unsigned int num_files;
struct list_head sections;
unsigned long num_relocs;
int symbol_bits;
int symbol_name_bits;
@ -99,16 +93,230 @@ struct elf {
int section_name_bits;
int reloc_bits;
struct hlist_head *symbol_hash;
struct hlist_head *symbol_name_hash;
struct hlist_head *section_hash;
struct hlist_head *section_name_hash;
struct hlist_head *reloc_hash;
struct elf_hash_node **symbol_hash;
struct elf_hash_node **symbol_name_hash;
struct elf_hash_node **section_hash;
struct elf_hash_node **section_name_hash;
struct elf_hash_node **reloc_hash;
struct section *section_data;
struct symbol *symbol_data;
};
struct elf *elf_open_read(const char *name, int flags);
struct section *elf_create_section(struct elf *elf, const char *name,
size_t entsize, unsigned int nr);
struct section *elf_create_section_pair(struct elf *elf, const char *name,
size_t entsize, unsigned int nr,
unsigned int reloc_nr);
struct symbol *elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size);
struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
unsigned long offset,
unsigned int reloc_idx,
struct section *insn_sec,
unsigned long insn_off);
struct reloc *elf_init_reloc_data_sym(struct elf *elf, struct section *sec,
unsigned long offset,
unsigned int reloc_idx,
struct symbol *sym,
s64 addend);
int elf_write_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len,
const char *insn);
int elf_write(struct elf *elf);
void elf_close(struct elf *elf);
struct section *find_section_by_name(const struct elf *elf, const char *name);
struct symbol *find_func_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_name(const struct elf *elf, const char *name);
struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset);
int find_symbol_hole_containing(const struct section *sec, unsigned long offset);
struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset);
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len);
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
/*
* Try to see if it's a whole archive (vmlinux.o or module).
*
* Note this will miss the case where a module only has one source file.
*/
static inline bool has_multiple_files(struct elf *elf)
{
return elf->num_files > 1;
}
static inline size_t elf_addr_size(struct elf *elf)
{
return elf->ehdr.e_ident[EI_CLASS] == ELFCLASS32 ? 4 : 8;
}
static inline size_t elf_rela_size(struct elf *elf)
{
return elf_addr_size(elf) == 4 ? sizeof(Elf32_Rela) : sizeof(Elf64_Rela);
}
static inline unsigned int elf_data_rela_type(struct elf *elf)
{
return elf_addr_size(elf) == 4 ? R_DATA32 : R_DATA64;
}
static inline unsigned int elf_text_rela_type(struct elf *elf)
{
return elf_addr_size(elf) == 4 ? R_TEXT32 : R_TEXT64;
}
static inline bool is_reloc_sec(struct section *sec)
{
return sec->sh.sh_type == SHT_RELA || sec->sh.sh_type == SHT_REL;
}
static inline bool sec_changed(struct section *sec)
{
return sec->_changed;
}
static inline void mark_sec_changed(struct elf *elf, struct section *sec,
bool changed)
{
sec->_changed = changed;
elf->changed |= changed;
}
static inline unsigned int sec_num_entries(struct section *sec)
{
return sec->sh.sh_size / sec->sh.sh_entsize;
}
static inline unsigned int reloc_idx(struct reloc *reloc)
{
return reloc - reloc->sec->relocs;
}
static inline void *reloc_rel(struct reloc *reloc)
{
struct section *rsec = reloc->sec;
return rsec->data->d_buf + (reloc_idx(reloc) * rsec->sh.sh_entsize);
}
static inline bool is_32bit_reloc(struct reloc *reloc)
{
/*
* Elf32_Rel: 8 bytes
* Elf32_Rela: 12 bytes
* Elf64_Rel: 16 bytes
* Elf64_Rela: 24 bytes
*/
return reloc->sec->sh.sh_entsize < 16;
}
#define __get_reloc_field(reloc, field) \
({ \
is_32bit_reloc(reloc) ? \
((Elf32_Rela *)reloc_rel(reloc))->field : \
((Elf64_Rela *)reloc_rel(reloc))->field; \
})
#define __set_reloc_field(reloc, field, val) \
({ \
if (is_32bit_reloc(reloc)) \
((Elf32_Rela *)reloc_rel(reloc))->field = val; \
else \
((Elf64_Rela *)reloc_rel(reloc))->field = val; \
})
static inline u64 reloc_offset(struct reloc *reloc)
{
return __get_reloc_field(reloc, r_offset);
}
static inline void set_reloc_offset(struct elf *elf, struct reloc *reloc, u64 offset)
{
__set_reloc_field(reloc, r_offset, offset);
mark_sec_changed(elf, reloc->sec, true);
}
static inline s64 reloc_addend(struct reloc *reloc)
{
return __get_reloc_field(reloc, r_addend);
}
static inline void set_reloc_addend(struct elf *elf, struct reloc *reloc, s64 addend)
{
__set_reloc_field(reloc, r_addend, addend);
mark_sec_changed(elf, reloc->sec, true);
}
static inline unsigned int reloc_sym(struct reloc *reloc)
{
u64 info = __get_reloc_field(reloc, r_info);
return is_32bit_reloc(reloc) ?
ELF32_R_SYM(info) :
ELF64_R_SYM(info);
}
static inline unsigned int reloc_type(struct reloc *reloc)
{
u64 info = __get_reloc_field(reloc, r_info);
return is_32bit_reloc(reloc) ?
ELF32_R_TYPE(info) :
ELF64_R_TYPE(info);
}
static inline void set_reloc_sym(struct elf *elf, struct reloc *reloc, unsigned int sym)
{
u64 info = is_32bit_reloc(reloc) ?
ELF32_R_INFO(sym, reloc_type(reloc)) :
ELF64_R_INFO(sym, reloc_type(reloc));
__set_reloc_field(reloc, r_info, info);
mark_sec_changed(elf, reloc->sec, true);
}
static inline void set_reloc_type(struct elf *elf, struct reloc *reloc, unsigned int type)
{
u64 info = is_32bit_reloc(reloc) ?
ELF32_R_INFO(reloc_sym(reloc), type) :
ELF64_R_INFO(reloc_sym(reloc), type);
__set_reloc_field(reloc, r_info, info);
mark_sec_changed(elf, reloc->sec, true);
}
#define for_each_sec(file, sec) \
list_for_each_entry(sec, &file->elf->sections, list)
#define sec_for_each_sym(sec, sym) \
list_for_each_entry(sym, &sec->symbol_list, list)
#define for_each_sym(file, sym) \
for (struct section *__sec, *__fake = (struct section *)1; \
__fake; __fake = NULL) \
for_each_sec(file, __sec) \
sec_for_each_sym(__sec, sym)
#define for_each_reloc(rsec, reloc) \
for (int __i = 0, __fake = 1; __fake; __fake = 0) \
for (reloc = rsec->relocs; \
__i < sec_num_entries(rsec); \
__i++, reloc++)
#define for_each_reloc_from(rsec, reloc) \
for (int __i = reloc_idx(reloc); \
__i < sec_num_entries(rsec); \
__i++, reloc++)
#define OFFSET_STRIDE_BITS 4
#define OFFSET_STRIDE (1UL << OFFSET_STRIDE_BITS)
#define OFFSET_STRIDE_MASK (~(OFFSET_STRIDE - 1))
@ -135,66 +343,7 @@ static inline u32 sec_offset_hash(struct section *sec, unsigned long offset)
static inline u32 reloc_hash(struct reloc *reloc)
{
return sec_offset_hash(reloc->sec, reloc->offset);
return sec_offset_hash(reloc->sec, reloc_offset(reloc));
}
/*
* Try to see if it's a whole archive (vmlinux.o or module).
*
* Note this will miss the case where a module only has one source file.
*/
static inline bool has_multiple_files(struct elf *elf)
{
return elf->num_files > 1;
}
static inline int elf_class_addrsize(struct elf *elf)
{
if (elf->ehdr.e_ident[EI_CLASS] == ELFCLASS32)
return sizeof(u32);
else
return sizeof(u64);
}
struct elf *elf_open_read(const char *name, int flags);
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
struct symbol *elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size);
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
unsigned int type, struct symbol *sym, s64 addend);
int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int type,
struct section *insn_sec, unsigned long insn_off);
int elf_write_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len,
const char *insn);
int elf_write_reloc(struct elf *elf, struct reloc *reloc);
int elf_write(struct elf *elf);
void elf_close(struct elf *elf);
struct section *find_section_by_name(const struct elf *elf, const char *name);
struct symbol *find_func_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_name(const struct elf *elf, const char *name);
struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset);
int find_symbol_hole_containing(const struct section *sec, unsigned long offset);
struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset);
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len);
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
#define for_each_sec(file, sec) \
list_for_each_entry(sec, &file->elf->sections, list)
#define sec_for_each_sym(sec, sym) \
list_for_each_entry(sym, &sec->symbol_list, list)
#define for_each_sym(file, sym) \
for (struct section *__sec, *__fake = (struct section *)1; \
__fake; __fake = NULL) \
for_each_sec(file, __sec) \
sec_for_each_sym(__sec, sym)
#endif /* _OBJTOOL_ELF_H */

View File

@ -55,15 +55,22 @@ static inline char *offstr(struct section *sec, unsigned long offset)
#define WARN_INSN(insn, format, ...) \
({ \
WARN_FUNC(format, insn->sec, insn->offset, ##__VA_ARGS__); \
struct instruction *_insn = (insn); \
if (!_insn->sym || !_insn->sym->warned) \
WARN_FUNC(format, _insn->sec, _insn->offset, \
##__VA_ARGS__); \
if (_insn->sym) \
_insn->sym->warned = 1; \
})
#define BT_FUNC(format, insn, ...) \
#define BT_INSN(insn, format, ...) \
({ \
if (opts.verbose || opts.backtrace) { \
struct instruction *_insn = (insn); \
char *_str = offstr(_insn->sec, _insn->offset); \
WARN(" %s: " format, _str, ##__VA_ARGS__); \
free(_str); \
} \
})
#define WARN_ELF(format, ...) \

46
tools/objtool/noreturns.h Normal file
View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is a (sorted!) list of all known __noreturn functions in the kernel.
* It's needed for objtool to properly reverse-engineer the control flow graph.
*
* Yes, this is unfortunate. A better solution is in the works.
*/
NORETURN(__invalid_creds)
NORETURN(__kunit_abort)
NORETURN(__module_put_and_kthread_exit)
NORETURN(__reiserfs_panic)
NORETURN(__stack_chk_fail)
NORETURN(__ubsan_handle_builtin_unreachable)
NORETURN(arch_call_rest_init)
NORETURN(arch_cpu_idle_dead)
NORETURN(btrfs_assertfail)
NORETURN(cpu_bringup_and_idle)
NORETURN(cpu_startup_entry)
NORETURN(do_exit)
NORETURN(do_group_exit)
NORETURN(do_task_dead)
NORETURN(ex_handler_msr_mce)
NORETURN(fortify_panic)
NORETURN(hlt_play_dead)
NORETURN(hv_ghcb_terminate)
NORETURN(kthread_complete_and_exit)
NORETURN(kthread_exit)
NORETURN(kunit_try_catch_throw)
NORETURN(machine_real_restart)
NORETURN(make_task_dead)
NORETURN(mpt_halt_firmware)
NORETURN(nmi_panic_self_stop)
NORETURN(panic)
NORETURN(panic_smp_self_stop)
NORETURN(rest_init)
NORETURN(rewind_stack_and_make_dead)
NORETURN(sev_es_terminate)
NORETURN(snp_abort)
NORETURN(start_kernel)
NORETURN(stop_this_cpu)
NORETURN(usercopy_abort)
NORETURN(x86_64_start_kernel)
NORETURN(x86_64_start_reservations)
NORETURN(xen_cpu_bringup_again)
NORETURN(xen_start_kernel)

View File

@ -118,7 +118,7 @@ static int write_orc_entry(struct elf *elf, struct section *orc_sec,
orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
/* populate reloc for ip */
if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32,
if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx,
insn_sec, insn_off))
return -1;
@ -237,12 +237,12 @@ int orc_create(struct objtool_file *file)
WARN("file already has .orc_unwind section, skipping");
return -1;
}
orc_sec = elf_create_section(file->elf, ".orc_unwind", 0,
orc_sec = elf_create_section(file->elf, ".orc_unwind",
sizeof(struct orc_entry), nr);
if (!orc_sec)
return -1;
sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), nr);
sec = elf_create_section_pair(file->elf, ".orc_unwind_ip", sizeof(int), nr, nr);
if (!sec)
return -1;

View File

@ -62,7 +62,7 @@ static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
unsigned long *off)
{
*sec = reloc->sym->sec;
*off = reloc->sym->offset + reloc->addend;
*off = reloc->sym->offset + reloc_addend(reloc);
}
static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
@ -126,7 +126,7 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
sec, offset + entry->key);
return -1;
}
alt->key_addend = key_reloc->addend;
alt->key_addend = reloc_addend(key_reloc);
}
return 0;