mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 18:24:14 +08:00
ef77e6880b
Use the newly added SYM_CODE_START_LOCAL* to annotate beginnings of all pseudo-functions (those ending with END until now) which do not have ".globl" annotation. This is needed to balance END for tools that generate debuginfo. Note that ENDs are switched to SYM_CODE_END too so that everybody can see the pairing. C-like functions (which handle frame ptr etc.) are not annotated here, hence SYM_CODE_* macros are used here, not SYM_FUNC_*. Note that the 32bit version of early_idt_handler_common already had ENDPROC -- switch that to SYM_CODE_END for the same reason as above (and to be the same as 64bit). While early_idt_handler_common is LOCAL, it's name is not prepended with ".L" as it happens to appear in call traces. bad_get_user*, and bad_put_user are now aligned, as they are separate functions. They do not mind to be aligned -- no need to be compact there. early_idt_handler_common is aligned now too, as it is after early_idt_handler_array, so as well no need to be compact there. verify_cpu is self-standing and included in other .S files, so align it too. The others have alignment preserved to what it used to be (using the _NOALIGN variant of macros). Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Alexios Zavras <alexios.zavras@intel.com> Cc: Allison Randal <allison@lohutok.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Cao jin <caoj.fnst@cn.fujitsu.com> Cc: Enrico Weigelt <info@metux.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: linux-arch@vger.kernel.org Cc: Maran Wilson <maran.wilson@oracle.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: x86-ml <x86@kernel.org> Link: https://lkml.kernel.org/r/20191011115108.12392-6-jslaby@suse.cz
141 lines
3.7 KiB
ArmAsm
141 lines
3.7 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
*
|
|
* verify_cpu.S - Code for cpu long mode and SSE verification. This
|
|
* code has been borrowed from boot/setup.S and was introduced by
|
|
* Andi Kleen.
|
|
*
|
|
* Copyright (c) 2007 Andi Kleen (ak@suse.de)
|
|
* Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
|
|
* Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
|
|
* Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
|
|
*
|
|
* This is a common code for verification whether CPU supports
|
|
* long mode and SSE or not. It is not called directly instead this
|
|
* file is included at various places and compiled in that context.
|
|
* This file is expected to run in 32bit code. Currently:
|
|
*
|
|
* arch/x86/boot/compressed/head_64.S: Boot cpu verification
|
|
* arch/x86/kernel/trampoline_64.S: secondary processor verification
|
|
* arch/x86/kernel/head_32.S: processor startup
|
|
*
|
|
* verify_cpu, returns the status of longmode and SSE in register %eax.
|
|
* 0: Success 1: Failure
|
|
*
|
|
* On Intel, the XD_DISABLE flag will be cleared as a side-effect.
|
|
*
|
|
* The caller needs to check for the error code and take the action
|
|
* appropriately. Either display a message or halt.
|
|
*/
|
|
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/msr-index.h>
|
|
|
|
SYM_FUNC_START_LOCAL(verify_cpu)
|
|
pushf # Save caller passed flags
|
|
push $0 # Kill any dangerous flags
|
|
popf
|
|
|
|
#ifndef __x86_64__
|
|
pushfl # standard way to check for cpuid
|
|
popl %eax
|
|
movl %eax,%ebx
|
|
xorl $0x200000,%eax
|
|
pushl %eax
|
|
popfl
|
|
pushfl
|
|
popl %eax
|
|
cmpl %eax,%ebx
|
|
jz .Lverify_cpu_no_longmode # cpu has no cpuid
|
|
#endif
|
|
|
|
movl $0x0,%eax # See if cpuid 1 is implemented
|
|
cpuid
|
|
cmpl $0x1,%eax
|
|
jb .Lverify_cpu_no_longmode # no cpuid 1
|
|
|
|
xor %di,%di
|
|
cmpl $0x68747541,%ebx # AuthenticAMD
|
|
jnz .Lverify_cpu_noamd
|
|
cmpl $0x69746e65,%edx
|
|
jnz .Lverify_cpu_noamd
|
|
cmpl $0x444d4163,%ecx
|
|
jnz .Lverify_cpu_noamd
|
|
mov $1,%di # cpu is from AMD
|
|
jmp .Lverify_cpu_check
|
|
|
|
.Lverify_cpu_noamd:
|
|
cmpl $0x756e6547,%ebx # GenuineIntel?
|
|
jnz .Lverify_cpu_check
|
|
cmpl $0x49656e69,%edx
|
|
jnz .Lverify_cpu_check
|
|
cmpl $0x6c65746e,%ecx
|
|
jnz .Lverify_cpu_check
|
|
|
|
# only call IA32_MISC_ENABLE when:
|
|
# family > 6 || (family == 6 && model >= 0xd)
|
|
movl $0x1, %eax # check CPU family and model
|
|
cpuid
|
|
movl %eax, %ecx
|
|
|
|
andl $0x0ff00f00, %eax # mask family and extended family
|
|
shrl $8, %eax
|
|
cmpl $6, %eax
|
|
ja .Lverify_cpu_clear_xd # family > 6, ok
|
|
jb .Lverify_cpu_check # family < 6, skip
|
|
|
|
andl $0x000f00f0, %ecx # mask model and extended model
|
|
shrl $4, %ecx
|
|
cmpl $0xd, %ecx
|
|
jb .Lverify_cpu_check # family == 6, model < 0xd, skip
|
|
|
|
.Lverify_cpu_clear_xd:
|
|
movl $MSR_IA32_MISC_ENABLE, %ecx
|
|
rdmsr
|
|
btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
|
|
jnc .Lverify_cpu_check # only write MSR if bit was changed
|
|
wrmsr
|
|
|
|
.Lverify_cpu_check:
|
|
movl $0x1,%eax # Does the cpu have what it takes
|
|
cpuid
|
|
andl $REQUIRED_MASK0,%edx
|
|
xorl $REQUIRED_MASK0,%edx
|
|
jnz .Lverify_cpu_no_longmode
|
|
|
|
movl $0x80000000,%eax # See if extended cpuid is implemented
|
|
cpuid
|
|
cmpl $0x80000001,%eax
|
|
jb .Lverify_cpu_no_longmode # no extended cpuid
|
|
|
|
movl $0x80000001,%eax # Does the cpu have what it takes
|
|
cpuid
|
|
andl $REQUIRED_MASK1,%edx
|
|
xorl $REQUIRED_MASK1,%edx
|
|
jnz .Lverify_cpu_no_longmode
|
|
|
|
.Lverify_cpu_sse_test:
|
|
movl $1,%eax
|
|
cpuid
|
|
andl $SSE_MASK,%edx
|
|
cmpl $SSE_MASK,%edx
|
|
je .Lverify_cpu_sse_ok
|
|
test %di,%di
|
|
jz .Lverify_cpu_no_longmode # only try to force SSE on AMD
|
|
movl $MSR_K7_HWCR,%ecx
|
|
rdmsr
|
|
btr $15,%eax # enable SSE
|
|
wrmsr
|
|
xor %di,%di # don't loop
|
|
jmp .Lverify_cpu_sse_test # try again
|
|
|
|
.Lverify_cpu_no_longmode:
|
|
popf # Restore caller passed flags
|
|
movl $1,%eax
|
|
ret
|
|
.Lverify_cpu_sse_ok:
|
|
popf # Restore caller passed flags
|
|
xorl %eax, %eax
|
|
ret
|
|
SYM_FUNC_END(verify_cpu)
|