mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 10:04:12 +08:00
07344b15a9
In arch/x86/boot/compressed/kaslr_64.c, CONFIG_AMD_MEM_ENCRYPT support was initially #undef'd to support SME with minimal effort. When support for SEV was added, the #undef remained and some minimal support for setting the encryption bit was added for building identity mapped pagetable entries. Commitb83ce5ee91
("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52") changed __PHYSICAL_MASK_SHIFT from 46 to 52 in support of 5-level paging. This change resulted in SEV guests failing to boot because the encryption bit was no longer being automatically masked out. The compressed boot path now requires sme_me_mask to be defined in order for the pagetable functions, such as pud_present(), to properly mask out the encryption bit (currently bit 47) when evaluating pagetable entries. Add an sme_me_mask variable in arch/x86/boot/compressed/mem_encrypt.S, which is set when SEV is active, delete the #undef CONFIG_AMD_MEM_ENCRYPT from arch/x86/boot/compressed/kaslr_64.c and use sme_me_mask when building the identify mapped pagetable entries. Fixes:b83ce5ee91
("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52") Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Link: https://lkml.kernel.org/r/20180327220711.8702.55842.stgit@tlendack-t1.amdoffice.net
124 lines
2.4 KiB
ArmAsm
124 lines
2.4 KiB
ArmAsm
/*
|
|
* AMD Memory Encryption Support
|
|
*
|
|
* Copyright (C) 2017 Advanced Micro Devices, Inc.
|
|
*
|
|
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/msr.h>
|
|
#include <asm/asm-offsets.h>
|
|
|
|
.text
|
|
.code32
|
|
ENTRY(get_sev_encryption_bit)
|
|
xor %eax, %eax
|
|
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
push %ebx
|
|
push %ecx
|
|
push %edx
|
|
push %edi
|
|
|
|
/*
|
|
* RIP-relative addressing is needed to access the encryption bit
|
|
* variable. Since we are running in 32-bit mode we need this call/pop
|
|
* sequence to get the proper relative addressing.
|
|
*/
|
|
call 1f
|
|
1: popl %edi
|
|
subl $1b, %edi
|
|
|
|
movl enc_bit(%edi), %eax
|
|
cmpl $0, %eax
|
|
jge .Lsev_exit
|
|
|
|
/* Check if running under a hypervisor */
|
|
movl $1, %eax
|
|
cpuid
|
|
bt $31, %ecx /* Check the hypervisor bit */
|
|
jnc .Lno_sev
|
|
|
|
movl $0x80000000, %eax /* CPUID to check the highest leaf */
|
|
cpuid
|
|
cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
|
|
jb .Lno_sev
|
|
|
|
/*
|
|
* Check for the SEV feature:
|
|
* CPUID Fn8000_001F[EAX] - Bit 1
|
|
* CPUID Fn8000_001F[EBX] - Bits 5:0
|
|
* Pagetable bit position used to indicate encryption
|
|
*/
|
|
movl $0x8000001f, %eax
|
|
cpuid
|
|
bt $1, %eax /* Check if SEV is available */
|
|
jnc .Lno_sev
|
|
|
|
movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
|
|
rdmsr
|
|
bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */
|
|
jnc .Lno_sev
|
|
|
|
movl %ebx, %eax
|
|
andl $0x3f, %eax /* Return the encryption bit location */
|
|
movl %eax, enc_bit(%edi)
|
|
jmp .Lsev_exit
|
|
|
|
.Lno_sev:
|
|
xor %eax, %eax
|
|
movl %eax, enc_bit(%edi)
|
|
|
|
.Lsev_exit:
|
|
pop %edi
|
|
pop %edx
|
|
pop %ecx
|
|
pop %ebx
|
|
|
|
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
|
|
|
ret
|
|
ENDPROC(get_sev_encryption_bit)
|
|
|
|
.code64
|
|
ENTRY(set_sev_encryption_mask)
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
push %rbp
|
|
push %rdx
|
|
|
|
movq %rsp, %rbp /* Save current stack pointer */
|
|
|
|
call get_sev_encryption_bit /* Get the encryption bit position */
|
|
testl %eax, %eax
|
|
jz .Lno_sev_mask
|
|
|
|
bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
|
|
|
|
.Lno_sev_mask:
|
|
movq %rbp, %rsp /* Restore original stack pointer */
|
|
|
|
pop %rdx
|
|
pop %rbp
|
|
#endif
|
|
|
|
xor %rax, %rax
|
|
ret
|
|
ENDPROC(set_sev_encryption_mask)
|
|
|
|
.data
|
|
enc_bit:
|
|
.int 0xffffffff
|
|
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
.balign 8
|
|
GLOBAL(sme_me_mask)
|
|
.quad 0
|
|
#endif
|