Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Memory Encryption Support Common Code
  4 *
  5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
  6 *
  7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
  8 */
  9
 10#include <linux/dma-direct.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/swiotlb.h>
 13#include <linux/cc_platform.h>
 14#include <linux/mem_encrypt.h>
 15#include <linux/virtio_anchor.h>
 16
 17#include <asm/sev.h>
 18
 19/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 20bool force_dma_unencrypted(struct device *dev)
 21{
 22	/*
 23	 * For SEV, all DMA must be to unencrypted addresses.
 24	 */
 25	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
 26		return true;
 27
 28	/*
 29	 * For SME, all DMA must be to unencrypted addresses if the
 30	 * device does not support DMA to addresses that include the
 31	 * encryption mask.
 32	 */
 33	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
 34		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
 35		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
 36						dev->bus_dma_limit);
 37
 38		if (dma_dev_mask <= dma_enc_mask)
 39			return true;
 40	}
 41
 42	return false;
 43}
 44
 45static void print_mem_encrypt_feature_info(void)
 46{
 47	pr_info("Memory Encryption Features active: ");
 
 
 
 
 
 48
 49	switch (cc_vendor) {
 50	case CC_VENDOR_INTEL:
 51		pr_cont("Intel TDX\n");
 52		break;
 53	case CC_VENDOR_AMD:
 54		pr_cont("AMD");
 55
 56		/* Secure Memory Encryption */
 57		if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
 58		/*
 59		 * SME is mutually exclusive with any of the SEV
 60		 * features below.
 61		*/
 62			pr_cont(" SME\n");
 63			return;
 64		}
 65
 66		/* Secure Encrypted Virtualization */
 67		if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
 68			pr_cont(" SEV");
 69
 70		/* Encrypted Register State */
 71		if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
 72			pr_cont(" SEV-ES");
 73
 74		/* Secure Nested Paging */
 75		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 76			pr_cont(" SEV-SNP");
 77
 78		pr_cont("\n");
 79
 80		sev_show_status();
 81
 82		break;
 83	default:
 84		pr_cont("Unknown\n");
 85	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86}
 87
 88/* Architecture __weak replacement functions */
 89void __init mem_encrypt_init(void)
 90{
 91	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
 92		return;
 93
 94	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
 95	swiotlb_update_mem_attributes();
 96
 97	print_mem_encrypt_feature_info();
 98}
 99
100void __init mem_encrypt_setup_arch(void)
101{
102	phys_addr_t total_mem = memblock_phys_mem_size();
103	unsigned long size;
104
105	/*
106	 * Do RMP table fixups after the e820 tables have been setup by
107	 * e820__memory_setup().
108	 */
109	if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
110		snp_fixup_e820_tables();
111
112	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
113		return;
114
115	/*
116	 * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
117	 * Kernel uses SWIOTLB to make this happen without changing device
118	 * drivers. However, depending on the workload being run, the
119	 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
120	 * run out of buffers for DMA, resulting in I/O errors and/or
121	 * performance degradation especially with high I/O workloads.
122	 *
123	 * Adjust the default size of SWIOTLB using a percentage of guest
124	 * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
125	 * memory is allocated from low memory, ensure that the adjusted size
126	 * is within the limits of low available memory.
127	 *
128	 * The percentage of guest memory used here for SWIOTLB buffers
129	 * is more of an approximation of the static adjustment which
130	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
131	 */
132	size = total_mem * 6 / 100;
133	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
134	swiotlb_adjust_size(size);
135
136	/* Set restricted memory access for virtio. */
137	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
138}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Memory Encryption Support Common Code
  4 *
  5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
  6 *
  7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
  8 */
  9
 10#include <linux/dma-direct.h>
 11#include <linux/dma-mapping.h>
 12#include <linux/swiotlb.h>
 13#include <linux/cc_platform.h>
 14#include <linux/mem_encrypt.h>
 15#include <linux/virtio_anchor.h>
 16
 
 
 17/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 18bool force_dma_unencrypted(struct device *dev)
 19{
 20	/*
 21	 * For SEV, all DMA must be to unencrypted addresses.
 22	 */
 23	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
 24		return true;
 25
 26	/*
 27	 * For SME, all DMA must be to unencrypted addresses if the
 28	 * device does not support DMA to addresses that include the
 29	 * encryption mask.
 30	 */
 31	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
 32		u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
 33		u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
 34						dev->bus_dma_limit);
 35
 36		if (dma_dev_mask <= dma_enc_mask)
 37			return true;
 38	}
 39
 40	return false;
 41}
 42
 43static void print_mem_encrypt_feature_info(void)
 44{
 45	pr_info("Memory Encryption Features active:");
 46
 47	if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
 48		pr_cont(" Intel TDX\n");
 49		return;
 50	}
 51
 52	pr_cont(" AMD");
 
 
 
 
 
 53
 54	/* Secure Memory Encryption */
 55	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
 56		/*
 57		 * SME is mutually exclusive with any of the SEV
 58		 * features below.
 59		 */
 60		pr_cont(" SME\n");
 61		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62	}
 63
 64	/* Secure Encrypted Virtualization */
 65	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
 66		pr_cont(" SEV");
 67
 68	/* Encrypted Register State */
 69	if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
 70		pr_cont(" SEV-ES");
 71
 72	/* Secure Nested Paging */
 73	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 74		pr_cont(" SEV-SNP");
 75
 76	pr_cont("\n");
 77}
 78
 79/* Architecture __weak replacement functions */
 80void __init mem_encrypt_init(void)
 81{
 82	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
 83		return;
 84
 85	/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
 86	swiotlb_update_mem_attributes();
 87
 88	print_mem_encrypt_feature_info();
 89}
 90
 91void __init mem_encrypt_setup_arch(void)
 92{
 93	phys_addr_t total_mem = memblock_phys_mem_size();
 94	unsigned long size;
 
 
 
 
 
 
 
 95
 96	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
 97		return;
 98
 99	/*
100	 * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
101	 * Kernel uses SWIOTLB to make this happen without changing device
102	 * drivers. However, depending on the workload being run, the
103	 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
104	 * run out of buffers for DMA, resulting in I/O errors and/or
105	 * performance degradation especially with high I/O workloads.
106	 *
107	 * Adjust the default size of SWIOTLB using a percentage of guest
108	 * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
109	 * memory is allocated from low memory, ensure that the adjusted size
110	 * is within the limits of low available memory.
111	 *
112	 * The percentage of guest memory used here for SWIOTLB buffers
113	 * is more of an approximation of the static adjustment which
114	 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
115	 */
116	size = total_mem * 6 / 100;
117	size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
118	swiotlb_adjust_size(size);
119
120	/* Set restricted memory access for virtio. */
121	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
122}