Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory Encryption Support Common Code
4 *
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#include <linux/dma-direct.h>
11#include <linux/dma-mapping.h>
12#include <linux/swiotlb.h>
13#include <linux/cc_platform.h>
14#include <linux/mem_encrypt.h>
15
16/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
17bool force_dma_unencrypted(struct device *dev)
18{
19 /*
20 * For SEV, all DMA must be to unencrypted addresses.
21 */
22 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
23 return true;
24
25 /*
26 * For SME, all DMA must be to unencrypted addresses if the
27 * device does not support DMA to addresses that include the
28 * encryption mask.
29 */
30 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
31 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
32 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
33 dev->bus_dma_limit);
34
35 if (dma_dev_mask <= dma_enc_mask)
36 return true;
37 }
38
39 return false;
40}
41
42static void print_mem_encrypt_feature_info(void)
43{
44 pr_info("Memory Encryption Features active:");
45
46 if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
47 pr_cont(" Intel TDX\n");
48 return;
49 }
50
51 pr_cont(" AMD");
52
53 /* Secure Memory Encryption */
54 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
55 /*
56 * SME is mutually exclusive with any of the SEV
57 * features below.
58 */
59 pr_cont(" SME\n");
60 return;
61 }
62
63 /* Secure Encrypted Virtualization */
64 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
65 pr_cont(" SEV");
66
67 /* Encrypted Register State */
68 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
69 pr_cont(" SEV-ES");
70
71 /* Secure Nested Paging */
72 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
73 pr_cont(" SEV-SNP");
74
75 pr_cont("\n");
76}
77
78/* Architecture __weak replacement functions */
79void __init mem_encrypt_init(void)
80{
81 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
82 return;
83
84 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
85 swiotlb_update_mem_attributes();
86
87 print_mem_encrypt_feature_info();
88}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory Encryption Support Common Code
4 *
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#include <linux/dma-direct.h>
11#include <linux/dma-mapping.h>
12#include <linux/swiotlb.h>
13#include <linux/cc_platform.h>
14#include <linux/mem_encrypt.h>
15#include <linux/virtio_anchor.h>
16
17#include <asm/sev.h>
18
19/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
20bool force_dma_unencrypted(struct device *dev)
21{
22 /*
23 * For SEV, all DMA must be to unencrypted addresses.
24 */
25 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
26 return true;
27
28 /*
29 * For SME, all DMA must be to unencrypted addresses if the
30 * device does not support DMA to addresses that include the
31 * encryption mask.
32 */
33 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
34 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
35 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
36 dev->bus_dma_limit);
37
38 if (dma_dev_mask <= dma_enc_mask)
39 return true;
40 }
41
42 return false;
43}
44
45static void print_mem_encrypt_feature_info(void)
46{
47 pr_info("Memory Encryption Features active: ");
48
49 switch (cc_vendor) {
50 case CC_VENDOR_INTEL:
51 pr_cont("Intel TDX\n");
52 break;
53 case CC_VENDOR_AMD:
54 pr_cont("AMD");
55
56 /* Secure Memory Encryption */
57 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
58 /*
59 * SME is mutually exclusive with any of the SEV
60 * features below.
61 */
62 pr_cont(" SME\n");
63 return;
64 }
65
66 /* Secure Encrypted Virtualization */
67 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
68 pr_cont(" SEV");
69
70 /* Encrypted Register State */
71 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
72 pr_cont(" SEV-ES");
73
74 /* Secure Nested Paging */
75 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
76 pr_cont(" SEV-SNP");
77
78 pr_cont("\n");
79
80 sev_show_status();
81
82 break;
83 default:
84 pr_cont("Unknown\n");
85 }
86}
87
88/* Architecture __weak replacement functions */
89void __init mem_encrypt_init(void)
90{
91 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
92 return;
93
94 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
95 swiotlb_update_mem_attributes();
96
97 print_mem_encrypt_feature_info();
98}
99
100void __init mem_encrypt_setup_arch(void)
101{
102 phys_addr_t total_mem = memblock_phys_mem_size();
103 unsigned long size;
104
105 /*
106 * Do RMP table fixups after the e820 tables have been setup by
107 * e820__memory_setup().
108 */
109 if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
110 snp_fixup_e820_tables();
111
112 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
113 return;
114
115 /*
116 * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
117 * Kernel uses SWIOTLB to make this happen without changing device
118 * drivers. However, depending on the workload being run, the
119 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
120 * run out of buffers for DMA, resulting in I/O errors and/or
121 * performance degradation especially with high I/O workloads.
122 *
123 * Adjust the default size of SWIOTLB using a percentage of guest
124 * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
125 * memory is allocated from low memory, ensure that the adjusted size
126 * is within the limits of low available memory.
127 *
128 * The percentage of guest memory used here for SWIOTLB buffers
129 * is more of an approximation of the static adjustment which
130 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
131 */
132 size = total_mem * 6 / 100;
133 size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
134 swiotlb_adjust_size(size);
135
136 /* Set restricted memory access for virtio. */
137 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
138}