mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 10:14:23 +08:00
2671c3e4fe
Unfortunately, we can only do this if HAVE_JUMP_LABEL. In principle, we could do some serious surgery on the core jump label infrastructure to keep the patch infrastructure available on x86 on all builds, but that's probably not worth it. Implementing the macros using a conditional branch as a fallback seems like a bad idea: we'd have to clobber flags. This limitation can't cause silent failures -- trying to include asm/jump_label.h at all on a non-HAVE_JUMP_LABEL kernel will error out. The macro's users are responsible for handling this issue themselves. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/63aa45c4b692e8469e1876d6ccbb5da707972990.1447361906.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
113 lines
2.4 KiB
C
113 lines
2.4 KiB
C
#ifndef _ASM_X86_JUMP_LABEL_H
|
|
#define _ASM_X86_JUMP_LABEL_H
|
|
|
|
#ifndef HAVE_JUMP_LABEL
|
|
/*
|
|
* For better or for worse, if jump labels (the gcc extension) are missing,
|
|
* then the entire static branch patching infrastructure is compiled out.
|
|
* If that happens, the code in here will malfunction. Raise a compiler
|
|
* error instead.
|
|
*
|
|
* In theory, jump labels and the static branch patching infrastructure
|
|
* could be decoupled to fix this.
|
|
*/
|
|
#error asm/jump_label.h included on a non-jump-label kernel
|
|
#endif
|
|
|
|
#define JUMP_LABEL_NOP_SIZE 5
|
|
|
|
#ifdef CONFIG_X86_64
|
|
# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
|
|
#else
|
|
# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
|
|
#endif
|
|
|
|
#include <asm/asm.h>
|
|
#include <asm/nops.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/stringify.h>
|
|
#include <linux/types.h>
|
|
|
|
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
|
{
|
|
asm_volatile_goto("1:"
|
|
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
|
|
".pushsection __jump_table, \"aw\" \n\t"
|
|
_ASM_ALIGN "\n\t"
|
|
_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
|
|
".popsection \n\t"
|
|
: : "i" (key), "i" (branch) : : l_yes);
|
|
|
|
return false;
|
|
l_yes:
|
|
return true;
|
|
}
|
|
|
|
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
|
|
{
|
|
asm_volatile_goto("1:"
|
|
".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
|
|
"2:\n\t"
|
|
".pushsection __jump_table, \"aw\" \n\t"
|
|
_ASM_ALIGN "\n\t"
|
|
_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
|
|
".popsection \n\t"
|
|
: : "i" (key), "i" (branch) : : l_yes);
|
|
|
|
return false;
|
|
l_yes:
|
|
return true;
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
typedef u64 jump_label_t;
|
|
#else
|
|
typedef u32 jump_label_t;
|
|
#endif
|
|
|
|
struct jump_entry {
|
|
jump_label_t code;
|
|
jump_label_t target;
|
|
jump_label_t key;
|
|
};
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
.macro STATIC_JUMP_IF_TRUE target, key, def
|
|
.Lstatic_jump_\@:
|
|
.if \def
|
|
/* Equivalent to "jmp.d32 \target" */
|
|
.byte 0xe9
|
|
.long \target - .Lstatic_jump_after_\@
|
|
.Lstatic_jump_after_\@:
|
|
.else
|
|
.byte STATIC_KEY_INIT_NOP
|
|
.endif
|
|
.pushsection __jump_table, "aw"
|
|
_ASM_ALIGN
|
|
_ASM_PTR .Lstatic_jump_\@, \target, \key
|
|
.popsection
|
|
.endm
|
|
|
|
.macro STATIC_JUMP_IF_FALSE target, key, def
|
|
.Lstatic_jump_\@:
|
|
.if \def
|
|
.byte STATIC_KEY_INIT_NOP
|
|
.else
|
|
/* Equivalent to "jmp.d32 \target" */
|
|
.byte 0xe9
|
|
.long \target - .Lstatic_jump_after_\@
|
|
.Lstatic_jump_after_\@:
|
|
.endif
|
|
.pushsection __jump_table, "aw"
|
|
_ASM_ALIGN
|
|
_ASM_PTR .Lstatic_jump_\@, \target, \key + 1
|
|
.popsection
|
|
.endm
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|