mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 23:14:31 +08:00
c7753208a9
Since DMA addresses will effectively look like 48-bit addresses when the memory encryption mask is set, SWIOTLB is needed if the DMA mask of the device performing the DMA does not support 48-bits. SWIOTLB will be initialized to create decrypted bounce buffers for use by these devices. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Dave Young <dyoung@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Toshimitsu Kani <toshi.kani@hpe.com> Cc: kasan-dev@googlegroups.com Cc: kvm@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-efi@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/aa2d29b78ae7d508db8881e46a3215231b9327a7.1500319216.git.thomas.lendacky@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
209 lines
5.2 KiB
C
209 lines
5.2 KiB
C
/*
|
|
* AMD Memory Encryption Support
|
|
*
|
|
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
|
*
|
|
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/swiotlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/bootparam.h>
|
|
#include <asm/set_memory.h>
|
|
|
|
/*
|
|
* Since SME related variables are set early in the boot process they must
|
|
* reside in the .data section so as not to be zeroed out when the .bss
|
|
* section is later cleared.
|
|
*/
|
|
unsigned long sme_me_mask __section(.data) = 0;
|
|
EXPORT_SYMBOL_GPL(sme_me_mask);
|
|
|
|
/* Buffer used for early in-place encryption by BSP, no locking needed */
|
|
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
|
|
|
|
/*
|
|
* This routine does not change the underlying encryption setting of the
|
|
* page(s) that map this memory. It assumes that eventually the memory is
|
|
* meant to be accessed as either encrypted or decrypted but the contents
|
|
* are currently not in the desired state.
|
|
*
|
|
* This routine follows the steps outlined in the AMD64 Architecture
|
|
* Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
|
|
*/
|
|
static void __init __sme_early_enc_dec(resource_size_t paddr,
|
|
unsigned long size, bool enc)
|
|
{
|
|
void *src, *dst;
|
|
size_t len;
|
|
|
|
if (!sme_me_mask)
|
|
return;
|
|
|
|
local_flush_tlb();
|
|
wbinvd();
|
|
|
|
/*
|
|
* There are limited number of early mapping slots, so map (at most)
|
|
* one page at time.
|
|
*/
|
|
while (size) {
|
|
len = min_t(size_t, sizeof(sme_early_buffer), size);
|
|
|
|
/*
|
|
* Create mappings for the current and desired format of
|
|
* the memory. Use a write-protected mapping for the source.
|
|
*/
|
|
src = enc ? early_memremap_decrypted_wp(paddr, len) :
|
|
early_memremap_encrypted_wp(paddr, len);
|
|
|
|
dst = enc ? early_memremap_encrypted(paddr, len) :
|
|
early_memremap_decrypted(paddr, len);
|
|
|
|
/*
|
|
* If a mapping can't be obtained to perform the operation,
|
|
* then eventual access of that area in the desired mode
|
|
* will cause a crash.
|
|
*/
|
|
BUG_ON(!src || !dst);
|
|
|
|
/*
|
|
* Use a temporary buffer, of cache-line multiple size, to
|
|
* avoid data corruption as documented in the APM.
|
|
*/
|
|
memcpy(sme_early_buffer, src, len);
|
|
memcpy(dst, sme_early_buffer, len);
|
|
|
|
early_memunmap(dst, len);
|
|
early_memunmap(src, len);
|
|
|
|
paddr += len;
|
|
size -= len;
|
|
}
|
|
}
|
|
|
|
void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
|
|
{
|
|
__sme_early_enc_dec(paddr, size, true);
|
|
}
|
|
|
|
void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
|
|
{
|
|
__sme_early_enc_dec(paddr, size, false);
|
|
}
|
|
|
|
static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
|
|
bool map)
|
|
{
|
|
unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
|
|
pmdval_t pmd_flags, pmd;
|
|
|
|
/* Use early_pmd_flags but remove the encryption mask */
|
|
pmd_flags = __sme_clr(early_pmd_flags);
|
|
|
|
do {
|
|
pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
|
|
__early_make_pgtable((unsigned long)vaddr, pmd);
|
|
|
|
vaddr += PMD_SIZE;
|
|
paddr += PMD_SIZE;
|
|
size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
|
|
} while (size);
|
|
|
|
__native_flush_tlb();
|
|
}
|
|
|
|
void __init sme_unmap_bootdata(char *real_mode_data)
|
|
{
|
|
struct boot_params *boot_data;
|
|
unsigned long cmdline_paddr;
|
|
|
|
if (!sme_active())
|
|
return;
|
|
|
|
/* Get the command line address before unmapping the real_mode_data */
|
|
boot_data = (struct boot_params *)real_mode_data;
|
|
cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
|
|
|
|
__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
|
|
|
|
if (!cmdline_paddr)
|
|
return;
|
|
|
|
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
|
|
}
|
|
|
|
void __init sme_map_bootdata(char *real_mode_data)
|
|
{
|
|
struct boot_params *boot_data;
|
|
unsigned long cmdline_paddr;
|
|
|
|
if (!sme_active())
|
|
return;
|
|
|
|
__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
|
|
|
|
/* Get the command line address after mapping the real_mode_data */
|
|
boot_data = (struct boot_params *)real_mode_data;
|
|
cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
|
|
|
|
if (!cmdline_paddr)
|
|
return;
|
|
|
|
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
|
|
}
|
|
|
|
void __init sme_early_init(void)
|
|
{
|
|
unsigned int i;
|
|
|
|
if (!sme_me_mask)
|
|
return;
|
|
|
|
early_pmd_flags = __sme_set(early_pmd_flags);
|
|
|
|
__supported_pte_mask = __sme_set(__supported_pte_mask);
|
|
|
|
/* Update the protection map with memory encryption mask */
|
|
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
|
|
protection_map[i] = pgprot_encrypted(protection_map[i]);
|
|
}
|
|
|
|
/* Architecture __weak replacement functions */
|
|
void __init mem_encrypt_init(void)
|
|
{
|
|
if (!sme_me_mask)
|
|
return;
|
|
|
|
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
|
|
swiotlb_update_mem_attributes();
|
|
}
|
|
|
|
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
|
|
{
|
|
WARN(PAGE_ALIGN(size) != size,
|
|
"size is not page-aligned (%#lx)\n", size);
|
|
|
|
/* Make the SWIOTLB buffer area decrypted */
|
|
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
|
|
}
|
|
|
|
void __init sme_encrypt_kernel(void)
|
|
{
|
|
}
|
|
|
|
void __init sme_enable(void)
|
|
{
|
|
}
|