2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 00:34:10 +08:00

treewide: Replace the use of mem_encrypt_active() with cc_platform_has()

Replace uses of mem_encrypt_active() with calls to cc_platform_has() with
the CC_ATTR_MEM_ENCRYPT attribute.

Remove the implementation of mem_encrypt_active() across all arches.

For s390, since the default implementation of the cc_platform_has()
matches the s390 implementation of mem_encrypt_active(), cc_platform_has()
does not need to be implemented in s390 (the config option
ARCH_HAS_CC_PLATFORM is not set).

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210928191009.32551-9-bp@alien8.de
This commit is contained in:
Tom Lendacky 2021-09-08 17:58:39 -05:00 committed by Borislav Petkov
parent 6283f2effb
commit e9d1d2bb75
18 changed files with 36 additions and 40 deletions

View File

@ -10,11 +10,6 @@
#include <asm/svm.h>
static inline bool mem_encrypt_active(void)
{
return is_secure_guest();
}
static inline bool force_dma_unencrypted(struct device *dev)
{
return is_secure_guest();

View File

@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/cc_platform.h>
#include <asm/machdep.h>
#include <asm/svm.h>
#include <asm/swiotlb.h>
@ -63,7 +64,7 @@ void __init svm_swiotlb_init(void)
int set_memory_encrypted(unsigned long addr, int numpages)
{
if (!mem_encrypt_active())
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0;
if (!PAGE_ALIGNED(addr))
@ -76,7 +77,7 @@ int set_memory_encrypted(unsigned long addr, int numpages)
int set_memory_decrypted(unsigned long addr, int numpages)
{
if (!mem_encrypt_active())
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0;
if (!PAGE_ALIGNED(addr))

View File

@ -4,8 +4,6 @@
#ifndef __ASSEMBLY__
static inline bool mem_encrypt_active(void) { return false; }
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);

View File

@ -96,11 +96,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
static inline bool mem_encrypt_active(void)
{
return sme_me_mask;
}
static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;

View File

@ -19,7 +19,7 @@
#include <linux/start_kernel.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
#include <linux/pgtable.h>
#include <asm/processor.h>
@ -284,8 +284,13 @@ unsigned long __head __startup_64(unsigned long physaddr,
* The bss section will be memset to zero later in the initialization so
* there is no need to zero it after changing the memory encryption
* attribute.
*
* This is early code, use an open coded check for SME instead of
* using cc_platform_has(). This eliminates worries about removing
* instrumentation or checking boot_cpu_data in the cc_platform_has()
* function.
*/
if (mem_encrypt_active()) {
if (sme_get_me_mask()) {
vaddr = (unsigned long)__start_bss_decrypted;
vaddr_end = (unsigned long)__end_bss_decrypted;
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {

View File

@ -694,7 +694,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
unsigned long flags)
{
if (!mem_encrypt_active())
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true;
if (flags & MEMREMAP_ENC)
@ -724,7 +724,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
{
bool encrypted_prot;
if (!mem_encrypt_active())
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return prot;
encrypted_prot = true;

View File

@ -400,7 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void)
* The unused memory range was mapped decrypted, change the encryption
* attribute from decrypted to encrypted before freeing it.
*/
if (mem_encrypt_active()) {
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
r = set_memory_encrypted(vaddr, npages);
if (r) {
pr_warn("failed to free unused decrypted pages\n");

View File

@ -18,6 +18,7 @@
#include <linux/libnvdimm.h>
#include <linux/vmstat.h>
#include <linux/kernel.h>
#include <linux/cc_platform.h>
#include <asm/e820/api.h>
#include <asm/processor.h>
@ -1986,7 +1987,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
int ret;
/* Nothing to do if memory encryption is not active */
if (!mem_encrypt_active())
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0;
/* Should not be working on unaligned addresses */

View File

@ -38,6 +38,7 @@
#include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
#include <linux/cc_platform.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@ -1269,7 +1270,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
* however, SME requires an indirect IOMMU mapping because the encryption
* bit is beyond the DMA mask of the chip.
*/
if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
dev_info(&pdev->dev,
"SME is not compatible with RAVEN\n");
return -ENOTSUPP;

View File

@ -31,7 +31,7 @@
#include <linux/dma-buf-map.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
#include <xen/xen.h>
#include <drm/drm_cache.h>
@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits)
* Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts.
*/
if (mem_encrypt_active())
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true;
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)

View File

@ -29,7 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */
if (mem_encrypt_active())
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL;
if (vmw_force_coherent)

View File

@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
#include <asm/hypervisor.h>
#include <drm/drm_ioctl.h>
@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */
if (hb && !mem_encrypt_active()) {
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_high;
u32 channel_id = (channel->channel_id << 16);
@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long si, di, eax, ebx, ecx, edx;
/* HB port can't access encrypted memory */
if (hb && !mem_encrypt_active()) {
if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_low;
u32 channel_id = (channel->channel_id << 16);

View File

@ -31,6 +31,7 @@
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/io-pgtable.h>
#include <linux/cc_platform.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
* active, because some of those devices (AMD GPUs) don't have the
* encryption bit in their DMA-mask and require remapping.
*/
if (!mem_encrypt_active() && dev_data->iommu_v2)
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;

View File

@ -17,6 +17,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/cc_platform.h>
#include "amd_iommu.h"
@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
* When memory encryption is active the device is likely not in a
* direct-mapped domain. Forbid using IOMMUv2 functionality for now.
*/
if (mem_encrypt_active())
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -ENODEV;
if (!amd_iommu_v2_supported())

View File

@ -25,6 +25,7 @@
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void)
else
iommu_set_default_translated(false);
if (iommu_default_passthrough() && mem_encrypt_active()) {
if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
iommu_set_default_translated(false);
}

View File

@ -26,7 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
#include <asm/io.h>
#include "internal.h"
@ -177,7 +177,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
*/
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
{
return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
}
/*
@ -378,7 +378,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
buflen);
start = m->paddr + *fpos - m->offset;
tmp = read_from_oldmem(buffer, tsz, &start,
userbuf, mem_encrypt_active());
userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
if (tmp < 0)
return tmp;
buflen -= tsz;

View File

@ -16,10 +16,6 @@
#include <asm/mem_encrypt.h>
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
static inline bool mem_encrypt_active(void) { return false; }
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
#ifdef CONFIG_AMD_MEM_ENCRYPT

View File

@ -34,7 +34,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
#include <linux/set_memory.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@ -552,7 +552,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
if (!mem)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active())
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
if (mapping_size > alloc_size) {