mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-18 10:34:24 +08:00
x86, cpu: Clean up and unify the NOP selection infrastructure
Clean up and unify the NOP selection infrastructure: - Make the atomic 5-byte NOP a part of the selection system. - Pick NOPs once during early boot and then be done with it. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Cc: Tejun Heo <tj@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jason Baron <jbaron@redhat.com> Link: http://lkml.kernel.org/r/1303166160-10315-3-git-send-email-hpa@linux.intel.com
This commit is contained in:
parent
b1e7734f02
commit
dc326fca2b
@ -191,12 +191,4 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
|
|||||||
extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
|
extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
|
||||||
extern void text_poke_smp_batch(struct text_poke_param *params, int n);
|
extern void text_poke_smp_batch(struct text_poke_param *params, int n);
|
||||||
|
|
||||||
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
|
|
||||||
#define IDEAL_NOP_SIZE_5 5
|
|
||||||
extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
|
|
||||||
extern void arch_init_ideal_nop5(void);
|
|
||||||
#else
|
|
||||||
static inline void arch_init_ideal_nop5(void) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_ALTERNATIVE_H */
|
#endif /* _ASM_X86_ALTERNATIVE_H */
|
||||||
|
@ -1,7 +1,13 @@
|
|||||||
#ifndef _ASM_X86_NOPS_H
|
#ifndef _ASM_X86_NOPS_H
|
||||||
#define _ASM_X86_NOPS_H
|
#define _ASM_X86_NOPS_H
|
||||||
|
|
||||||
/* Define nops for use with alternative() */
|
/*
|
||||||
|
* Define nops for use with alternative() and for tracing.
|
||||||
|
*
|
||||||
|
* *_NOP5_ATOMIC must be a single instruction.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define NOP_DS_PREFIX 0x3e
|
||||||
|
|
||||||
/* generic versions from gas
|
/* generic versions from gas
|
||||||
1: nop
|
1: nop
|
||||||
@ -13,14 +19,15 @@
|
|||||||
6: leal 0x00000000(%esi),%esi
|
6: leal 0x00000000(%esi),%esi
|
||||||
7: leal 0x00000000(,%esi,1),%esi
|
7: leal 0x00000000(,%esi,1),%esi
|
||||||
*/
|
*/
|
||||||
#define GENERIC_NOP1 ".byte 0x90\n"
|
#define GENERIC_NOP1 0x90
|
||||||
#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
|
#define GENERIC_NOP2 0x89,0xf6
|
||||||
#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
|
#define GENERIC_NOP3 0x8d,0x76,0x00
|
||||||
#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
|
#define GENERIC_NOP4 0x8d,0x74,0x26,0x00
|
||||||
#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
|
#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4
|
||||||
#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
|
#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
|
||||||
#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
|
#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
|
||||||
#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
|
#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7
|
||||||
|
#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4
|
||||||
|
|
||||||
/* Opteron 64bit nops
|
/* Opteron 64bit nops
|
||||||
1: nop
|
1: nop
|
||||||
@ -29,13 +36,14 @@
|
|||||||
4: osp osp osp nop
|
4: osp osp osp nop
|
||||||
*/
|
*/
|
||||||
#define K8_NOP1 GENERIC_NOP1
|
#define K8_NOP1 GENERIC_NOP1
|
||||||
#define K8_NOP2 ".byte 0x66,0x90\n"
|
#define K8_NOP2 0x66,K8_NOP1
|
||||||
#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
|
#define K8_NOP3 0x66,K8_NOP2
|
||||||
#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
|
#define K8_NOP4 0x66,K8_NOP3
|
||||||
#define K8_NOP5 K8_NOP3 K8_NOP2
|
#define K8_NOP5 K8_NOP3,K8_NOP2
|
||||||
#define K8_NOP6 K8_NOP3 K8_NOP3
|
#define K8_NOP6 K8_NOP3,K8_NOP3
|
||||||
#define K8_NOP7 K8_NOP4 K8_NOP3
|
#define K8_NOP7 K8_NOP4,K8_NOP3
|
||||||
#define K8_NOP8 K8_NOP4 K8_NOP4
|
#define K8_NOP8 K8_NOP4,K8_NOP4
|
||||||
|
#define K8_NOP5_ATOMIC 0x66,K8_NOP4
|
||||||
|
|
||||||
/* K7 nops
|
/* K7 nops
|
||||||
uses eax dependencies (arbitrary choice)
|
uses eax dependencies (arbitrary choice)
|
||||||
@ -47,13 +55,14 @@
|
|||||||
7: leal 0x00000000(,%eax,1),%eax
|
7: leal 0x00000000(,%eax,1),%eax
|
||||||
*/
|
*/
|
||||||
#define K7_NOP1 GENERIC_NOP1
|
#define K7_NOP1 GENERIC_NOP1
|
||||||
#define K7_NOP2 ".byte 0x8b,0xc0\n"
|
#define K7_NOP2 0x8b,0xc0
|
||||||
#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
|
#define K7_NOP3 0x8d,0x04,0x20
|
||||||
#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
|
#define K7_NOP4 0x8d,0x44,0x20,0x00
|
||||||
#define K7_NOP5 K7_NOP4 ASM_NOP1
|
#define K7_NOP5 K7_NOP4,K7_NOP1
|
||||||
#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
|
#define K7_NOP6 0x8d,0x80,0,0,0,0
|
||||||
#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
|
#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0
|
||||||
#define K7_NOP8 K7_NOP7 ASM_NOP1
|
#define K7_NOP8 K7_NOP7,K7_NOP1
|
||||||
|
#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4
|
||||||
|
|
||||||
/* P6 nops
|
/* P6 nops
|
||||||
uses eax dependencies (Intel-recommended choice)
|
uses eax dependencies (Intel-recommended choice)
|
||||||
@ -69,52 +78,65 @@
|
|||||||
There is kernel code that depends on this.
|
There is kernel code that depends on this.
|
||||||
*/
|
*/
|
||||||
#define P6_NOP1 GENERIC_NOP1
|
#define P6_NOP1 GENERIC_NOP1
|
||||||
#define P6_NOP2 ".byte 0x66,0x90\n"
|
#define P6_NOP2 0x66,0x90
|
||||||
#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
|
#define P6_NOP3 0x0f,0x1f,0x00
|
||||||
#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
|
#define P6_NOP4 0x0f,0x1f,0x40,0
|
||||||
#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
|
#define P6_NOP5 0x0f,0x1f,0x44,0x00,0
|
||||||
#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
|
#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0
|
||||||
#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
|
#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0
|
||||||
#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
|
#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
|
||||||
|
#define P6_NOP5_ATOMIC P6_NOP5
|
||||||
|
|
||||||
|
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
|
||||||
|
|
||||||
#if defined(CONFIG_MK7)
|
#if defined(CONFIG_MK7)
|
||||||
#define ASM_NOP1 K7_NOP1
|
#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
|
||||||
#define ASM_NOP2 K7_NOP2
|
#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2)
|
||||||
#define ASM_NOP3 K7_NOP3
|
#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3)
|
||||||
#define ASM_NOP4 K7_NOP4
|
#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4)
|
||||||
#define ASM_NOP5 K7_NOP5
|
#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5)
|
||||||
#define ASM_NOP6 K7_NOP6
|
#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6)
|
||||||
#define ASM_NOP7 K7_NOP7
|
#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7)
|
||||||
#define ASM_NOP8 K7_NOP8
|
#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8)
|
||||||
|
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC)
|
||||||
#elif defined(CONFIG_X86_P6_NOP)
|
#elif defined(CONFIG_X86_P6_NOP)
|
||||||
#define ASM_NOP1 P6_NOP1
|
#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1)
|
||||||
#define ASM_NOP2 P6_NOP2
|
#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2)
|
||||||
#define ASM_NOP3 P6_NOP3
|
#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3)
|
||||||
#define ASM_NOP4 P6_NOP4
|
#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4)
|
||||||
#define ASM_NOP5 P6_NOP5
|
#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5)
|
||||||
#define ASM_NOP6 P6_NOP6
|
#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6)
|
||||||
#define ASM_NOP7 P6_NOP7
|
#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7)
|
||||||
#define ASM_NOP8 P6_NOP8
|
#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8)
|
||||||
|
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC)
|
||||||
#elif defined(CONFIG_X86_64)
|
#elif defined(CONFIG_X86_64)
|
||||||
#define ASM_NOP1 K8_NOP1
|
#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1)
|
||||||
#define ASM_NOP2 K8_NOP2
|
#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2)
|
||||||
#define ASM_NOP3 K8_NOP3
|
#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3)
|
||||||
#define ASM_NOP4 K8_NOP4
|
#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4)
|
||||||
#define ASM_NOP5 K8_NOP5
|
#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5)
|
||||||
#define ASM_NOP6 K8_NOP6
|
#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6)
|
||||||
#define ASM_NOP7 K8_NOP7
|
#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7)
|
||||||
#define ASM_NOP8 K8_NOP8
|
#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
|
||||||
|
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC)
|
||||||
#else
|
#else
|
||||||
#define ASM_NOP1 GENERIC_NOP1
|
#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1)
|
||||||
#define ASM_NOP2 GENERIC_NOP2
|
#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2)
|
||||||
#define ASM_NOP3 GENERIC_NOP3
|
#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3)
|
||||||
#define ASM_NOP4 GENERIC_NOP4
|
#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4)
|
||||||
#define ASM_NOP5 GENERIC_NOP5
|
#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5)
|
||||||
#define ASM_NOP6 GENERIC_NOP6
|
#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6)
|
||||||
#define ASM_NOP7 GENERIC_NOP7
|
#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7)
|
||||||
#define ASM_NOP8 GENERIC_NOP8
|
#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8)
|
||||||
|
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ASM_NOP_MAX 8
|
#define ASM_NOP_MAX 8
|
||||||
|
#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
extern const unsigned char * const *ideal_nops;
|
||||||
|
extern void arch_init_ideal_nops(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_NOPS_H */
|
#endif /* _ASM_X86_NOPS_H */
|
||||||
|
@ -67,17 +67,30 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt);
|
|||||||
#define DPRINTK(fmt, args...) if (debug_alternative) \
|
#define DPRINTK(fmt, args...) if (debug_alternative) \
|
||||||
printk(KERN_DEBUG fmt, args)
|
printk(KERN_DEBUG fmt, args)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
|
||||||
|
* that correspond to that nop. Getting from one nop to the next, we
|
||||||
|
* add to the array the offset that is equal to the sum of all sizes of
|
||||||
|
* nops preceding the one we are after.
|
||||||
|
*
|
||||||
|
* Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
|
||||||
|
* nice symmetry of sizes of the previous nops.
|
||||||
|
*/
|
||||||
#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
|
#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
|
||||||
/* Use inline assembly to define this because the nops are defined
|
static const unsigned char intelnops[] =
|
||||||
as inline assembly strings in the include files and we cannot
|
{
|
||||||
get them easily into strings. */
|
GENERIC_NOP1,
|
||||||
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
|
GENERIC_NOP2,
|
||||||
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
|
GENERIC_NOP3,
|
||||||
GENERIC_NOP7 GENERIC_NOP8
|
GENERIC_NOP4,
|
||||||
"\t.previous");
|
GENERIC_NOP5,
|
||||||
extern const unsigned char intelnops[];
|
GENERIC_NOP6,
|
||||||
static const unsigned char *const __initconst_or_module
|
GENERIC_NOP7,
|
||||||
intel_nops[ASM_NOP_MAX+1] = {
|
GENERIC_NOP8,
|
||||||
|
GENERIC_NOP5_ATOMIC
|
||||||
|
};
|
||||||
|
static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
|
||||||
|
{
|
||||||
NULL,
|
NULL,
|
||||||
intelnops,
|
intelnops,
|
||||||
intelnops + 1,
|
intelnops + 1,
|
||||||
@ -87,17 +100,25 @@ intel_nops[ASM_NOP_MAX+1] = {
|
|||||||
intelnops + 1 + 2 + 3 + 4 + 5,
|
intelnops + 1 + 2 + 3 + 4 + 5,
|
||||||
intelnops + 1 + 2 + 3 + 4 + 5 + 6,
|
intelnops + 1 + 2 + 3 + 4 + 5 + 6,
|
||||||
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
||||||
|
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef K8_NOP1
|
#ifdef K8_NOP1
|
||||||
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
|
static const unsigned char k8nops[] =
|
||||||
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
|
{
|
||||||
K8_NOP7 K8_NOP8
|
K8_NOP1,
|
||||||
"\t.previous");
|
K8_NOP2,
|
||||||
extern const unsigned char k8nops[];
|
K8_NOP3,
|
||||||
static const unsigned char *const __initconst_or_module
|
K8_NOP4,
|
||||||
k8_nops[ASM_NOP_MAX+1] = {
|
K8_NOP5,
|
||||||
|
K8_NOP6,
|
||||||
|
K8_NOP7,
|
||||||
|
K8_NOP8,
|
||||||
|
K8_NOP5_ATOMIC
|
||||||
|
};
|
||||||
|
static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
|
||||||
|
{
|
||||||
NULL,
|
NULL,
|
||||||
k8nops,
|
k8nops,
|
||||||
k8nops + 1,
|
k8nops + 1,
|
||||||
@ -107,17 +128,25 @@ k8_nops[ASM_NOP_MAX+1] = {
|
|||||||
k8nops + 1 + 2 + 3 + 4 + 5,
|
k8nops + 1 + 2 + 3 + 4 + 5,
|
||||||
k8nops + 1 + 2 + 3 + 4 + 5 + 6,
|
k8nops + 1 + 2 + 3 + 4 + 5 + 6,
|
||||||
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
||||||
|
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
|
#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
|
||||||
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
|
static const unsigned char k7nops[] =
|
||||||
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
|
{
|
||||||
K7_NOP7 K7_NOP8
|
K7_NOP1,
|
||||||
"\t.previous");
|
K7_NOP2,
|
||||||
extern const unsigned char k7nops[];
|
K7_NOP3,
|
||||||
static const unsigned char *const __initconst_or_module
|
K7_NOP4,
|
||||||
k7_nops[ASM_NOP_MAX+1] = {
|
K7_NOP5,
|
||||||
|
K7_NOP6,
|
||||||
|
K7_NOP7,
|
||||||
|
K7_NOP8,
|
||||||
|
K7_NOP5_ATOMIC
|
||||||
|
};
|
||||||
|
static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
|
||||||
|
{
|
||||||
NULL,
|
NULL,
|
||||||
k7nops,
|
k7nops,
|
||||||
k7nops + 1,
|
k7nops + 1,
|
||||||
@ -127,17 +156,25 @@ k7_nops[ASM_NOP_MAX+1] = {
|
|||||||
k7nops + 1 + 2 + 3 + 4 + 5,
|
k7nops + 1 + 2 + 3 + 4 + 5,
|
||||||
k7nops + 1 + 2 + 3 + 4 + 5 + 6,
|
k7nops + 1 + 2 + 3 + 4 + 5 + 6,
|
||||||
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
||||||
|
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef P6_NOP1
|
#ifdef P6_NOP1
|
||||||
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
|
static const unsigned char __initconst_or_module p6nops[] =
|
||||||
P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
|
{
|
||||||
P6_NOP7 P6_NOP8
|
P6_NOP1,
|
||||||
"\t.previous");
|
P6_NOP2,
|
||||||
extern const unsigned char p6nops[];
|
P6_NOP3,
|
||||||
static const unsigned char *const __initconst_or_module
|
P6_NOP4,
|
||||||
p6_nops[ASM_NOP_MAX+1] = {
|
P6_NOP5,
|
||||||
|
P6_NOP6,
|
||||||
|
P6_NOP7,
|
||||||
|
P6_NOP8,
|
||||||
|
P6_NOP5_ATOMIC
|
||||||
|
};
|
||||||
|
static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
|
||||||
|
{
|
||||||
NULL,
|
NULL,
|
||||||
p6nops,
|
p6nops,
|
||||||
p6nops + 1,
|
p6nops + 1,
|
||||||
@ -147,47 +184,53 @@ p6_nops[ASM_NOP_MAX+1] = {
|
|||||||
p6nops + 1 + 2 + 3 + 4 + 5,
|
p6nops + 1 + 2 + 3 + 4 + 5,
|
||||||
p6nops + 1 + 2 + 3 + 4 + 5 + 6,
|
p6nops + 1 + 2 + 3 + 4 + 5 + 6,
|
||||||
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
|
||||||
|
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Initialize these to a safe default */
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
const unsigned char * const *ideal_nops = p6_nops;
|
||||||
|
#else
|
||||||
|
const unsigned char * const *ideal_nops = intel_nops;
|
||||||
|
#endif
|
||||||
|
|
||||||
extern char __vsyscall_0;
|
void __init arch_init_ideal_nops(void)
|
||||||
static const unsigned char *const *__init_or_module find_nop_table(void)
|
|
||||||
{
|
{
|
||||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
switch (boot_cpu_data.x86_vendor) {
|
||||||
boot_cpu_has(X86_FEATURE_NOPL))
|
case X86_VENDOR_INTEL:
|
||||||
return p6_nops;
|
if (boot_cpu_has(X86_FEATURE_NOPL)) {
|
||||||
else
|
ideal_nops = p6_nops;
|
||||||
return k8_nops;
|
} else {
|
||||||
}
|
#ifdef CONFIG_X86_64
|
||||||
|
ideal_nops = k8_nops;
|
||||||
|
#else
|
||||||
|
ideal_nops = intel_nops;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#else /* CONFIG_X86_64 */
|
default:
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
static const unsigned char *const *__init_or_module find_nop_table(void)
|
ideal_nops = k8_nops;
|
||||||
{
|
#else
|
||||||
if (boot_cpu_has(X86_FEATURE_K8))
|
if (boot_cpu_has(X86_FEATURE_K8))
|
||||||
return k8_nops;
|
ideal_nops = k8_nops;
|
||||||
else if (boot_cpu_has(X86_FEATURE_K7))
|
else if (boot_cpu_has(X86_FEATURE_K7))
|
||||||
return k7_nops;
|
ideal_nops = k7_nops;
|
||||||
else if (boot_cpu_has(X86_FEATURE_NOPL))
|
|
||||||
return p6_nops;
|
|
||||||
else
|
else
|
||||||
return intel_nops;
|
ideal_nops = intel_nops;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_X86_64 */
|
|
||||||
|
|
||||||
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
|
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
|
||||||
static void __init_or_module add_nops(void *insns, unsigned int len)
|
static void __init_or_module add_nops(void *insns, unsigned int len)
|
||||||
{
|
{
|
||||||
const unsigned char *const *noptable = find_nop_table();
|
|
||||||
|
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
unsigned int noplen = len;
|
unsigned int noplen = len;
|
||||||
if (noplen > ASM_NOP_MAX)
|
if (noplen > ASM_NOP_MAX)
|
||||||
noplen = ASM_NOP_MAX;
|
noplen = ASM_NOP_MAX;
|
||||||
memcpy(insns, noptable[noplen], noplen);
|
memcpy(insns, ideal_nops[noplen], noplen);
|
||||||
insns += noplen;
|
insns += noplen;
|
||||||
len -= noplen;
|
len -= noplen;
|
||||||
}
|
}
|
||||||
@ -195,6 +238,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
|
|||||||
|
|
||||||
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
||||||
extern s32 __smp_locks[], __smp_locks_end[];
|
extern s32 __smp_locks[], __smp_locks_end[];
|
||||||
|
extern char __vsyscall_0;
|
||||||
void *text_poke_early(void *addr, const void *opcode, size_t len);
|
void *text_poke_early(void *addr, const void *opcode, size_t len);
|
||||||
|
|
||||||
/* Replace instructions with better alternatives for this CPU type.
|
/* Replace instructions with better alternatives for this CPU type.
|
||||||
@ -678,29 +722,3 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
|
|||||||
wrote_text = 0;
|
wrote_text = 0;
|
||||||
__stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
|
__stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
|
|
||||||
#else
|
|
||||||
unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void __init arch_init_ideal_nop5(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* There is no good nop for all x86 archs. This selection
|
|
||||||
* algorithm should be unified with the one in find_nop_table(),
|
|
||||||
* but this should be good enough for now.
|
|
||||||
*
|
|
||||||
* For cases other than the ones below, use the safe (as in
|
|
||||||
* always functional) defaults above.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
/* Don't use these on 32 bits due to broken virtualizers */
|
|
||||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
||||||
memcpy(ideal_nop5, p6_nops[5], 5);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
@ -260,9 +260,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
|
|||||||
return mod_code_status;
|
return mod_code_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned char *ftrace_nop_replace(void)
|
static const unsigned char *ftrace_nop_replace(void)
|
||||||
{
|
{
|
||||||
return ideal_nop5;
|
return ideal_nops[NOP_ATOMIC5];
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -34,7 +34,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
|||||||
code.offset = entry->target -
|
code.offset = entry->target -
|
||||||
(entry->code + JUMP_LABEL_NOP_SIZE);
|
(entry->code + JUMP_LABEL_NOP_SIZE);
|
||||||
} else
|
} else
|
||||||
memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE);
|
memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
mutex_lock(&text_mutex);
|
mutex_lock(&text_mutex);
|
||||||
text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
|
text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
|
||||||
@ -44,7 +44,8 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
|||||||
|
|
||||||
void arch_jump_label_text_poke_early(jump_label_t addr)
|
void arch_jump_label_text_poke_early(jump_label_t addr)
|
||||||
{
|
{
|
||||||
text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE);
|
text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5],
|
||||||
|
JUMP_LABEL_NOP_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow);
|
|||||||
|
|
||||||
void __init setup_arch(char **cmdline_p)
|
void __init setup_arch(char **cmdline_p)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
|
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
|
||||||
visws_early_detect();
|
visws_early_detect();
|
||||||
@ -1036,9 +1034,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
mcheck_init();
|
mcheck_init();
|
||||||
|
|
||||||
local_irq_save(flags);
|
arch_init_ideal_nops();
|
||||||
arch_init_ideal_nop5();
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
Loading…
Reference in New Issue
Block a user