A single ELF format fix for a section flags mismatch bug that breaks

kernel tooling such as kpatch-build.
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmDZYv4RHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1ipeBAAhJPS/kCQ17Y5zGyMB0/6yfCWIifODoS7
 9J+6/mqKHPDdV07yzPtOXuTTmpKV4OHPi8Yj8kaXs5L5fOmQ1uAwITwZNF5hU0a5
 CiFIsubUCJmglf9b6L9EH5pBEQ72Cq4u8zIhJ9LmZ4t625AHJAm2ikZgascc4U67
 RvVoGr5sYTo0YEsc1IDM1wUtnUhXBNjS1VwkXNnCFFTXYHju47MeY1sPHq2hvkzO
 iJGC9A+hxfM1eQt9/qC/2L/6F/XECN61gcR9Get8TkWeEGHmPG+FthmPLd4oO9Ho
 03J4JfMbmXumWosAeilYBNUkfii/M5Em78Wpv/cB94iSt67rq7Eb+8gm4D5svmfN
 l+utsPY/HYB+uWV0hy2cV/ORRiwcJnon54dEWL6912YkKz+OIb3DK/7l9ex5lW+D
 r3o8NP0s6S+RgUkOFxz5VaYK1giu6fiaFysWdKeflvwlvY/64owMepQ1QfPBbeB7
 3DTzvuYZ4Cb1x/vR6WBbFqGcuJKZ1CsZIBLCblveUs+G0wlu147K5E1qlXg/Wvq7
 5Vzznc4fmRng8np5hxAw8ieLkatWg7szyryUV/4H2Ubs/jWGcH628ZYbapaCb7EM
 Eson65xzbVfhnz16z8sN13XIF1lGe8sb0+qiFSclEfyDUnZDuhwMn6d9Ubqxrg5J
 uTULEzmY/rI=
 =MvPd
 -----END PGP SIGNATURE-----
mergetag object d33b9035e1
 type commit
 tag objtool-core-2021-06-28
 tagger Ingo Molnar <mingo@kernel.org> 1624859477 +0200
 
 The biggest change in this cycle is the new code to handle
 and rewrite variable sized jump labels - which results in
 slightly tighter code generation in hot paths, through the
 use of short(er) NOPs.
 
 Also a number of cleanups and fixes, and a change to the
 generic include/linux/compiler.h to handle a s390 GCC quirk.
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmDZZGcRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1goYg/7BxUIJXP0F5wbrMbAvJIDRgR/j3TA+ztk
 uNU1yabBGluMxCqJ87HadJ+A5d010G+GRUn/birVr7w1UuwWv8HOda78dnyG7tme
 xm78/1FlOnstuOTQxhK6rjbb2cp+QOmdsAQkq1TF4SOxArBQiwtjiOvytHjb5yNx
 7LrlbtuZ7Dtc0qd2evkG4ma4QkGoDhBS1dRogrItc27ZLuFIQoNnEd2K2QNMgczw
 a/Jx8fgNmdoJSq+vkBn9TnS/cJYUW/PAlPNtO3ac8yE857aDIVnjXFRzveAP/nTh
 rwFD6aCGnJAqyqP7A8ElNjySos5O+ebYApxe7rEx0TNLbrc55qSP9lpdIO+vgytV
 Xzy4O7z6o+lailQ4EoF8Qf+rlPeue0kLF23SsNbZY1uT0vjX1Uv70xgKbkuyPygp
 GNXAy6dOXK0AfaZYL/Wa50yVnJnkYDjes/hHr+HEam5Oad566pqIyQNP8yWSPqaf
 KHkL//1pb5C2RKwot4IYv/ftHfZB5QftoFq6bhGBc1GXUd/FiqivvGHPW/6g7rxi
 ZIrXs+Fqm/5KP9mssNONfyz5XEvbcUTD1CbeqX9eyVbiYZbLp1oWSgtogiRW9ya+
 HR7t0Dt/UFzFWbilb6EZff/Hdr1NZBZLdrfpvVDoMf5tR9J0BIOyjddTu89g/FIO
 KcfJ5yyjJBU=
 =+HAB
 -----END PGP SIGNATURE-----

Merge tags 'objtool-urgent-2021-06-28' and 'objtool-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull objtool fix and updates from Ingo Molnar:
 "An ELF format fix for a section flags mismatch bug that breaks kernel
  tooling such as kpatch-build.

  The biggest change in this cycle is the new code to handle and rewrite
  variable sized jump labels - which results in slightly tighter code
  generation in hot paths, through the use of short(er) NOPs.

  Also a number of cleanups and fixes, and a change to the generic
  include/linux/compiler.h to handle a s390 GCC quirk"

* tag 'objtool-urgent-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Don't make .altinstructions writable

* tag 'objtool-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Improve reloc hash size guestimate
  instrumentation.h: Avoid using inline asm operand modifiers
  compiler.h: Avoid using inline asm operand modifiers
  kbuild: Fix objtool dependency for 'OBJECT_FILES_NON_STANDARD_<obj> := n'
  objtool: Reflow handle_jump_alt()
  jump_label/x86: Remove unused JUMP_LABEL_NOP_SIZE
  jump_label, x86: Allow short NOPs
  objtool: Provide stats for jump_labels
  objtool: Rewrite jump_label instructions
  objtool: Decode jump_entry::key addend
  jump_label, x86: Emit short JMP
  jump_label: Free jump_entry::key bit1 for build use
  jump_label, x86: Add variable length patching support
  jump_label, x86: Introduce jump_entry_size()
  jump_label, x86: Improve error when we fail expected text
  jump_label, x86: Factor out the __jump_table generation
  jump_label, x86: Strip ASM jump_label support
  x86, objtool: Dont exclude arch/x86/realmode/
  objtool: Rewrite hashtable sizing
This commit is contained in:
Linus Torvalds 2021-06-28 11:35:55 -07:00
commit b89c07dea1
16 changed files with 268 additions and 155 deletions

View File

@ -4,8 +4,6 @@
#define HAVE_JUMP_LABEL_BATCH
#define JUMP_LABEL_NOP_SIZE 5
#include <asm/asm.h>
#include <asm/nops.h>
@ -14,15 +12,35 @@
#include <linux/stringify.h>
#include <linux/types.h>
#define JUMP_TABLE_ENTRY \
".pushsection __jump_table, \"aw\" \n\t" \
_ASM_ALIGN "\n\t" \
".long 1b - . \n\t" \
".long %l[l_yes] - . \n\t" \
_ASM_PTR "%c0 + %c1 - .\n\t" \
".popsection \n\t"
#ifdef CONFIG_STACK_VALIDATION
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:"
"jmp %l[l_yes] # objtool NOPs this \n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (2 | branch) : : l_yes);
return false;
l_yes:
return true;
}
#else
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@ -30,16 +48,13 @@ l_yes:
return true;
}
#endif /* STACK_VALIDATION */
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
"2:\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
"jmp %l[l_yes]\n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@ -47,41 +62,7 @@ l_yes:
return true;
}
#else /* __ASSEMBLY__ */
.macro STATIC_JUMP_IF_TRUE target, key, def
.Lstatic_jump_\@:
.if \def
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.else
.byte BYTES_NOP5
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key - .
.popsection
.endm
.macro STATIC_JUMP_IF_FALSE target, key, def
.Lstatic_jump_\@:
.if \def
.byte BYTES_NOP5
.else
/* Equivalent to "jmp.d32 \target" */
.byte 0xe9
.long \target - .Lstatic_jump_after_\@
.Lstatic_jump_after_\@:
.endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
.long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + 1 - .
.popsection
.endm
extern int arch_jump_entry_size(struct jump_entry *entry);
#endif /* __ASSEMBLY__ */

View File

@ -15,50 +15,75 @@
#include <asm/kprobes.h>
#include <asm/alternative.h>
#include <asm/text-patching.h>
#include <asm/insn.h>
static void bug_at(const void *ip, int line)
int arch_jump_entry_size(struct jump_entry *entry)
{
/*
* The location is not an op that we were expecting.
* Something went wrong. Crash the box, as something could be
* corrupting the kernel.
*/
pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
BUG();
struct insn insn = {};
insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
BUG_ON(insn.length != 2 && insn.length != 5);
return insn.length;
}
static const void *
__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
struct jump_label_patch {
const void *code;
int size;
};
static struct jump_label_patch
__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
{
const void *expect, *code;
const void *expect, *code, *nop;
const void *addr, *dest;
int line;
int size;
addr = (void *)jump_entry_code(entry);
dest = (void *)jump_entry_target(entry);
code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
size = arch_jump_entry_size(entry);
switch (size) {
case JMP8_INSN_SIZE:
code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
nop = x86_nops[size];
break;
if (type == JUMP_LABEL_JMP) {
expect = x86_nops[5]; line = __LINE__;
} else {
expect = code; line = __LINE__;
case JMP32_INSN_SIZE:
code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
nop = x86_nops[size];
break;
default: BUG();
}
if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
bug_at(addr, line);
if (type == JUMP_LABEL_JMP)
expect = nop;
else
expect = code;
if (memcmp(addr, expect, size)) {
/*
* The location is not an op that we were expecting.
* Something went wrong. Crash the box, as something could be
* corrupting the kernel.
*/
pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
addr, addr, addr, expect, size, type);
BUG();
}
if (type == JUMP_LABEL_NOP)
code = x86_nops[5];
code = nop;
return code;
return (struct jump_label_patch){.code = code, .size = size};
}
static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
const void *opcode = __jump_label_set_jump_code(entry, type);
const struct jump_label_patch jlp = __jump_label_patch(entry, type);
/*
* As long as only a single processor is running and the code is still
@ -72,12 +97,11 @@ static inline void __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*/
if (init || system_state == SYSTEM_BOOTING) {
text_poke_early((void *)jump_entry_code(entry), opcode,
JUMP_LABEL_NOP_SIZE);
text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
return;
}
text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}
static void __ref jump_label_transform(struct jump_entry *entry,
@ -98,7 +122,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
const void *opcode;
struct jump_label_patch jlp;
if (system_state == SYSTEM_BOOTING) {
/*
@ -109,9 +133,8 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
}
mutex_lock(&text_mutex);
opcode = __jump_label_set_jump_code(entry, type);
text_poke_queue((void *)jump_entry_code(entry),
opcode, JUMP_LABEL_NOP_SIZE, NULL);
jlp = __jump_label_patch(entry, type);
text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}

View File

@ -10,7 +10,6 @@
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
subdir- := rm

View File

@ -115,18 +115,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* The __COUNTER__ based labels are a hack to make each instance of the macros
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
asm volatile("%c0:\n\t" \
#define __stringify_label(n) #n
#define __annotate_reachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify_label(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define annotate_unreachable() ({ \
asm volatile("%c0:\n\t" \
#define annotate_reachable() __annotate_reachable(__COUNTER__)
#define __annotate_unreachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify_label(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \

View File

@ -4,13 +4,16 @@
#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION)
#include <linux/stringify.h>
/* Begin/end of an instrumentation safe region */
#define instrumentation_begin() ({ \
asm volatile("%c0: nop\n\t" \
#define __instrumentation_begin(c) ({ \
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
/*
* Because instrumentation_{begin,end}() can nest, objtool validation considers
@ -43,12 +46,13 @@
* To avoid this, have _end() be a NOP instruction, this ensures it will be
* part of the condition block and does not escape.
*/
#define instrumentation_end() ({ \
asm volatile("%c0: nop\n\t" \
#define __instrumentation_end(c) ({ \
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_end\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
".long " __stringify(c) "b - .\n\t" \
".popsection\n\t"); \
})
#define instrumentation_end() __instrumentation_end(__COUNTER__)
#else
# define instrumentation_begin() do { } while(0)
# define instrumentation_end() do { } while(0)

View File

@ -171,9 +171,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry)
return (unsigned long)entry->key & 2UL;
}
static inline void jump_entry_set_init(struct jump_entry *entry)
static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
{
entry->key |= 2;
if (set)
entry->key |= 2;
else
entry->key &= ~2;
}
static inline int jump_entry_size(struct jump_entry *entry)
{
#ifdef JUMP_LABEL_NOP_SIZE
return JUMP_LABEL_NOP_SIZE;
#else
return arch_jump_entry_size(entry);
#endif
}
#endif

View File

@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (jump_entry_code(entry) <= (unsigned long)end &&
jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
return 1;
return 0;
@ -483,13 +483,14 @@ void __init jump_label_init(void)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
bool in_init;
/* rewrite NOPs */
if (jump_label_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
if (init_section_contains((void *)jump_entry_code(iter), 1))
jump_entry_set_init(iter);
in_init = init_section_contains((void *)jump_entry_code(iter), 1);
jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)
@ -634,9 +635,10 @@ static int jump_label_add_module(struct module *mod)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
bool in_init;
if (within_module_init(jump_entry_code(iter), mod))
jump_entry_set_init(iter);
in_init = within_module_init(jump_entry_code(iter), mod);
jump_entry_set_init(iter, in_init);
iterk = jump_entry_key(iter);
if (iterk == key)

View File

@ -268,7 +268,8 @@ define rule_as_o_S
endef
# Built-in and composite module parts
$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
.SECONDEXPANSION:
$(obj)/%.o: $(src)/%.c $(recordmcount_source) $$(objtool_dep) FORCE
$(call if_changed_rule,cc_o_c)
$(call cmd,force_checksrc)
@ -349,7 +350,7 @@ cmd_modversions_S = \
fi
endif
$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE
$(obj)/%.o: $(src)/%.S $$(objtool_dep) FORCE
$(call if_changed_rule,as_o_S)
targets += $(filter-out $(subdir-builtin), $(real-obj-y))

View File

@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
sec = find_section_by_name(elf, ".altinstructions");
if (!sec) {
sec = elf_create_section(elf, ".altinstructions",
SHF_WRITE, size, 0);
SHF_ALLOC, size, 0);
if (!sec) {
WARN_ELF("elf_create_section");

View File

@ -9,6 +9,7 @@
#define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 4
#define JUMP_KEY_OFFSET 8
#define ALT_ENTRY_SIZE 12
#define ALT_ORIG_OFFSET 0

View File

@ -1225,15 +1225,41 @@ static int handle_jump_alt(struct objtool_file *file,
struct instruction *orig_insn,
struct instruction **new_insn)
{
if (orig_insn->type == INSN_NOP)
return 0;
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
orig_insn->type != INSN_NOP) {
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
WARN_FUNC("unsupported instruction at jump label",
orig_insn->sec, orig_insn->offset);
return -1;
}
if (special_alt->key_addend & 2) {
struct reloc *reloc = insn_reloc(file, orig_insn);
if (reloc) {
reloc->type = R_NONE;
elf_write_reloc(file->elf, reloc);
}
elf_write_insn(file->elf, orig_insn->sec,
orig_insn->offset, orig_insn->len,
arch_nop_insn(orig_insn->len));
orig_insn->type = INSN_NOP;
}
if (orig_insn->type == INSN_NOP) {
if (orig_insn->len == 2)
file->jl_nop_short++;
else
file->jl_nop_long++;
return 0;
}
if (orig_insn->len == 2)
file->jl_short++;
else
file->jl_long++;
*new_insn = list_next_entry(orig_insn, list);
return 0;
}
@ -1314,6 +1340,12 @@ static int add_special_section_alts(struct objtool_file *file)
free(special_alt);
}
if (stats) {
printf("jl\\\tNOP\tJMP\n");
printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
}
out:
return ret;
}

View File

@ -9,6 +9,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
@ -27,21 +28,27 @@ static inline u32 str_hash(const char *str)
return jhash(str, strlen(str), 0);
}
static inline int elf_hash_bits(void)
{
return vmlinux ? ELF_HASH_BITS : 16;
}
#define __elf_table(name) (elf->name##_hash)
#define __elf_bits(name) (elf->name##_bits)
#define elf_hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, elf_hash_bits())])
#define elf_hash_add(name, node, key) \
hlist_add_head(node, &__elf_table(name)[hash_min(key, __elf_bits(name))])
static void elf_hash_init(struct hlist_head *table)
{
__hash_init(table, 1U << elf_hash_bits());
}
#define elf_hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &__elf_table(name)[hash_min(key, __elf_bits(name))], member)
#define elf_hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &name[hash_min(key, elf_hash_bits())], member)
#define elf_alloc_hash(name, size) \
({ \
__elf_bits(name) = max(10, ilog2(size)); \
__elf_table(name) = mmap(NULL, sizeof(struct hlist_head) << __elf_bits(name), \
PROT_READ|PROT_WRITE, \
MAP_PRIVATE|MAP_ANON, -1, 0); \
if (__elf_table(name) == (void *)-1L) { \
WARN("mmap fail " #name); \
__elf_table(name) = NULL; \
} \
__elf_table(name); \
})
static bool symbol_to_offset(struct rb_node *a, const struct rb_node *b)
{
@ -80,9 +87,10 @@ struct section *find_section_by_name(const struct elf *elf, const char *name)
{
struct section *sec;
elf_hash_for_each_possible(elf->section_name_hash, sec, name_hash, str_hash(name))
elf_hash_for_each_possible(section_name, sec, name_hash, str_hash(name)) {
if (!strcmp(sec->name, name))
return sec;
}
return NULL;
}
@ -92,9 +100,10 @@ static struct section *find_section_by_index(struct elf *elf,
{
struct section *sec;
elf_hash_for_each_possible(elf->section_hash, sec, hash, idx)
elf_hash_for_each_possible(section, sec, hash, idx) {
if (sec->idx == idx)
return sec;
}
return NULL;
}
@ -103,9 +112,10 @@ static struct symbol *find_symbol_by_index(struct elf *elf, unsigned int idx)
{
struct symbol *sym;
elf_hash_for_each_possible(elf->symbol_hash, sym, hash, idx)
elf_hash_for_each_possible(symbol, sym, hash, idx) {
if (sym->idx == idx)
return sym;
}
return NULL;
}
@ -170,9 +180,10 @@ struct symbol *find_symbol_by_name(const struct elf *elf, const char *name)
{
struct symbol *sym;
elf_hash_for_each_possible(elf->symbol_name_hash, sym, name_hash, str_hash(name))
elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) {
if (!strcmp(sym->name, name))
return sym;
}
return NULL;
}
@ -189,8 +200,8 @@ struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *se
sec = sec->reloc;
for_offset_range(o, offset, offset + len) {
elf_hash_for_each_possible(elf->reloc_hash, reloc, hash,
sec_offset_hash(sec, o)) {
elf_hash_for_each_possible(reloc, reloc, hash,
sec_offset_hash(sec, o)) {
if (reloc->sec != sec)
continue;
@ -228,6 +239,10 @@ static int read_sections(struct elf *elf)
return -1;
}
if (!elf_alloc_hash(section, sections_nr) ||
!elf_alloc_hash(section_name, sections_nr))
return -1;
for (i = 0; i < sections_nr; i++) {
sec = malloc(sizeof(*sec));
if (!sec) {
@ -273,13 +288,18 @@ static int read_sections(struct elf *elf)
}
sec->len = sec->sh.sh_size;
if (sec->sh.sh_flags & SHF_EXECINSTR)
elf->text_size += sec->len;
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
elf_hash_add(section, &sec->hash, sec->idx);
elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
}
if (stats)
if (stats) {
printf("nr_sections: %lu\n", (unsigned long)sections_nr);
printf("section_bits: %d\n", elf->section_bits);
}
/* sanity check, one more call to elf_nextscn() should return NULL */
if (elf_nextscn(elf->elf, s)) {
@ -308,8 +328,8 @@ static void elf_add_symbol(struct elf *elf, struct symbol *sym)
else
entry = &sym->sec->symbol_list;
list_add(&sym->list, entry);
elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
elf_hash_add(symbol, &sym->hash, sym->idx);
elf_hash_add(symbol_name, &sym->name_hash, str_hash(sym->name));
/*
* Don't store empty STT_NOTYPE symbols in the rbtree. They
@ -329,19 +349,25 @@ static int read_symbols(struct elf *elf)
Elf32_Word shndx;
symtab = find_section_by_name(elf, ".symtab");
if (!symtab) {
if (symtab) {
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
if (symtab_shndx)
shndx_data = symtab_shndx->data;
symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
} else {
/*
* A missing symbol table is actually possible if it's an empty
* .o file. This can happen for thunk_64.o.
* .o file. This can happen for thunk_64.o. Make sure to at
* least allocate the symbol hash tables so we can do symbol
* lookups without crashing.
*/
return 0;
symbols_nr = 0;
}
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
if (symtab_shndx)
shndx_data = symtab_shndx->data;
symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
if (!elf_alloc_hash(symbol, symbols_nr) ||
!elf_alloc_hash(symbol_name, symbols_nr))
return -1;
for (i = 0; i < symbols_nr; i++) {
sym = malloc(sizeof(*sym));
@ -389,8 +415,10 @@ static int read_symbols(struct elf *elf)
elf_add_symbol(elf, sym);
}
if (stats)
if (stats) {
printf("nr_symbols: %lu\n", (unsigned long)symbols_nr);
printf("symbol_bits: %d\n", elf->symbol_bits);
}
/* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) {
@ -479,7 +507,7 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
reloc->addend = addend;
list_add_tail(&reloc->list, &sec->reloc->reloc_list);
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
sec->reloc->changed = true;
@ -556,6 +584,9 @@ static int read_relocs(struct elf *elf)
unsigned int symndx;
unsigned long nr_reloc, max_reloc = 0, tot_reloc = 0;
if (!elf_alloc_hash(reloc, elf->text_size / 16))
return -1;
list_for_each_entry(sec, &elf->sections, list) {
if ((sec->sh.sh_type != SHT_RELA) &&
(sec->sh.sh_type != SHT_REL))
@ -600,7 +631,7 @@ static int read_relocs(struct elf *elf)
}
list_add_tail(&reloc->list, &sec->reloc_list);
elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc));
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
nr_reloc++;
}
@ -611,6 +642,7 @@ static int read_relocs(struct elf *elf)
if (stats) {
printf("max_reloc: %lu\n", max_reloc);
printf("tot_reloc: %lu\n", tot_reloc);
printf("reloc_bits: %d\n", elf->reloc_bits);
}
return 0;
@ -632,12 +664,6 @@ struct elf *elf_open_read(const char *name, int flags)
INIT_LIST_HEAD(&elf->sections);
elf_hash_init(elf->symbol_hash);
elf_hash_init(elf->symbol_name_hash);
elf_hash_init(elf->section_hash);
elf_hash_init(elf->section_name_hash);
elf_hash_init(elf->reloc_hash);
elf->fd = open(name, flags);
if (elf->fd == -1) {
fprintf(stderr, "objtool: Can't open '%s': %s\n",
@ -874,8 +900,8 @@ struct section *elf_create_section(struct elf *elf, const char *name,
return NULL;
list_add_tail(&sec->list, &elf->sections);
elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
elf_hash_add(section, &sec->hash, sec->idx);
elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
elf->changed = true;

View File

@ -83,12 +83,20 @@ struct elf {
int fd;
bool changed;
char *name;
unsigned int text_size;
struct list_head sections;
DECLARE_HASHTABLE(symbol_hash, ELF_HASH_BITS);
DECLARE_HASHTABLE(symbol_name_hash, ELF_HASH_BITS);
DECLARE_HASHTABLE(section_hash, ELF_HASH_BITS);
DECLARE_HASHTABLE(section_name_hash, ELF_HASH_BITS);
DECLARE_HASHTABLE(reloc_hash, ELF_HASH_BITS);
int symbol_bits;
int symbol_name_bits;
int section_bits;
int section_name_bits;
int reloc_bits;
struct hlist_head *symbol_hash;
struct hlist_head *symbol_name_hash;
struct hlist_head *section_hash;
struct hlist_head *section_name_hash;
struct hlist_head *reloc_hash;
};
#define OFFSET_STRIDE_BITS 4

View File

@ -22,6 +22,9 @@ struct objtool_file {
struct list_head static_call_list;
struct list_head mcount_loc_list;
bool ignore_unreachables, c_file, hints, rodata;
unsigned long jl_short, jl_long;
unsigned long jl_nop_short, jl_nop_long;
};
struct objtool_file *objtool_open_read(const char *_objname);

View File

@ -27,6 +27,7 @@ struct special_alt {
unsigned long new_off;
unsigned int orig_len, new_len; /* group only */
u8 key_addend;
};
int special_get_alts(struct elf *elf, struct list_head *alts);

View File

@ -23,6 +23,7 @@ struct special_entry {
unsigned char size, orig, new;
unsigned char orig_len, new_len; /* group only */
unsigned char feature; /* ALTERNATIVE macro CPU feature */
unsigned char key; /* jump_label key */
};
struct special_entry entries[] = {
@ -42,6 +43,7 @@ struct special_entry entries[] = {
.size = JUMP_ENTRY_SIZE,
.orig = JUMP_ORIG_OFFSET,
.new = JUMP_NEW_OFFSET,
.key = JUMP_KEY_OFFSET,
},
{
.sec = "__ex_table",
@ -122,6 +124,18 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
alt->new_off -= 0x7ffffff0;
}
if (entry->key) {
struct reloc *key_reloc;
key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
if (!key_reloc) {
WARN_FUNC("can't find key reloc",
sec, offset + entry->key);
return -1;
}
alt->key_addend = key_reloc->addend;
}
return 0;
}