Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Confidential Computing Platform Capability checks
4 *
5 * Copyright (C) 2021 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#include <linux/export.h>
11#include <linux/cc_platform.h>
12
13#include <asm/coco.h>
14#include <asm/processor.h>
15
16static enum cc_vendor vendor __ro_after_init;
17static u64 cc_mask __ro_after_init;
18
19static bool intel_cc_platform_has(enum cc_attr attr)
20{
21 switch (attr) {
22 case CC_ATTR_GUEST_UNROLL_STRING_IO:
23 case CC_ATTR_HOTPLUG_DISABLED:
24 case CC_ATTR_GUEST_MEM_ENCRYPT:
25 case CC_ATTR_MEM_ENCRYPT:
26 return true;
27 default:
28 return false;
29 }
30}
31
32/*
33 * SME and SEV are very similar but they are not the same, so there are
34 * times that the kernel will need to distinguish between SME and SEV. The
35 * cc_platform_has() function is used for this. When a distinction isn't
36 * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
37 *
38 * The trampoline code is a good example for this requirement. Before
39 * paging is activated, SME will access all memory as decrypted, but SEV
40 * will access all memory as encrypted. So, when APs are being brought
41 * up under SME the trampoline area cannot be encrypted, whereas under SEV
42 * the trampoline area must be encrypted.
43 */
44static bool amd_cc_platform_has(enum cc_attr attr)
45{
46#ifdef CONFIG_AMD_MEM_ENCRYPT
47 switch (attr) {
48 case CC_ATTR_MEM_ENCRYPT:
49 return sme_me_mask;
50
51 case CC_ATTR_HOST_MEM_ENCRYPT:
52 return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
53
54 case CC_ATTR_GUEST_MEM_ENCRYPT:
55 return sev_status & MSR_AMD64_SEV_ENABLED;
56
57 case CC_ATTR_GUEST_STATE_ENCRYPT:
58 return sev_status & MSR_AMD64_SEV_ES_ENABLED;
59
60 /*
61 * With SEV, the rep string I/O instructions need to be unrolled
62 * but SEV-ES supports them through the #VC handler.
63 */
64 case CC_ATTR_GUEST_UNROLL_STRING_IO:
65 return (sev_status & MSR_AMD64_SEV_ENABLED) &&
66 !(sev_status & MSR_AMD64_SEV_ES_ENABLED);
67
68 case CC_ATTR_GUEST_SEV_SNP:
69 return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
70
71 default:
72 return false;
73 }
74#else
75 return false;
76#endif
77}
78
79static bool hyperv_cc_platform_has(enum cc_attr attr)
80{
81 return attr == CC_ATTR_GUEST_MEM_ENCRYPT;
82}
83
84bool cc_platform_has(enum cc_attr attr)
85{
86 switch (vendor) {
87 case CC_VENDOR_AMD:
88 return amd_cc_platform_has(attr);
89 case CC_VENDOR_INTEL:
90 return intel_cc_platform_has(attr);
91 case CC_VENDOR_HYPERV:
92 return hyperv_cc_platform_has(attr);
93 default:
94 return false;
95 }
96}
97EXPORT_SYMBOL_GPL(cc_platform_has);
98
99u64 cc_mkenc(u64 val)
100{
101 /*
102 * Both AMD and Intel use a bit in the page table to indicate
103 * encryption status of the page.
104 *
105 * - for AMD, bit *set* means the page is encrypted
106 * - for Intel *clear* means encrypted.
107 */
108 switch (vendor) {
109 case CC_VENDOR_AMD:
110 return val | cc_mask;
111 case CC_VENDOR_INTEL:
112 return val & ~cc_mask;
113 default:
114 return val;
115 }
116}
117
118u64 cc_mkdec(u64 val)
119{
120 /* See comment in cc_mkenc() */
121 switch (vendor) {
122 case CC_VENDOR_AMD:
123 return val & ~cc_mask;
124 case CC_VENDOR_INTEL:
125 return val | cc_mask;
126 default:
127 return val;
128 }
129}
130EXPORT_SYMBOL_GPL(cc_mkdec);
131
132__init void cc_set_vendor(enum cc_vendor v)
133{
134 vendor = v;
135}
136
137__init void cc_set_mask(u64 mask)
138{
139 cc_mask = mask;
140}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Confidential Computing Platform Capability checks
4 *
5 * Copyright (C) 2021 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#include <linux/export.h>
11#include <linux/cc_platform.h>
12
13#include <asm/coco.h>
14#include <asm/processor.h>
15
16enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
17static u64 cc_mask __ro_after_init;
18
19static bool noinstr intel_cc_platform_has(enum cc_attr attr)
20{
21 switch (attr) {
22 case CC_ATTR_GUEST_UNROLL_STRING_IO:
23 case CC_ATTR_HOTPLUG_DISABLED:
24 case CC_ATTR_GUEST_MEM_ENCRYPT:
25 case CC_ATTR_MEM_ENCRYPT:
26 return true;
27 default:
28 return false;
29 }
30}
31
32/*
33 * Handle the SEV-SNP vTOM case where sme_me_mask is zero, and
34 * the other levels of SME/SEV functionality, including C-bit
35 * based SEV-SNP, are not enabled.
36 */
37static __maybe_unused __always_inline bool amd_cc_platform_vtom(enum cc_attr attr)
38{
39 switch (attr) {
40 case CC_ATTR_GUEST_MEM_ENCRYPT:
41 case CC_ATTR_MEM_ENCRYPT:
42 return true;
43 default:
44 return false;
45 }
46}
47
48/*
49 * SME and SEV are very similar but they are not the same, so there are
50 * times that the kernel will need to distinguish between SME and SEV. The
51 * cc_platform_has() function is used for this. When a distinction isn't
52 * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
53 *
54 * The trampoline code is a good example for this requirement. Before
55 * paging is activated, SME will access all memory as decrypted, but SEV
56 * will access all memory as encrypted. So, when APs are being brought
57 * up under SME the trampoline area cannot be encrypted, whereas under SEV
58 * the trampoline area must be encrypted.
59 */
60
61static bool noinstr amd_cc_platform_has(enum cc_attr attr)
62{
63#ifdef CONFIG_AMD_MEM_ENCRYPT
64
65 if (sev_status & MSR_AMD64_SNP_VTOM)
66 return amd_cc_platform_vtom(attr);
67
68 switch (attr) {
69 case CC_ATTR_MEM_ENCRYPT:
70 return sme_me_mask;
71
72 case CC_ATTR_HOST_MEM_ENCRYPT:
73 return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
74
75 case CC_ATTR_GUEST_MEM_ENCRYPT:
76 return sev_status & MSR_AMD64_SEV_ENABLED;
77
78 case CC_ATTR_GUEST_STATE_ENCRYPT:
79 return sev_status & MSR_AMD64_SEV_ES_ENABLED;
80
81 /*
82 * With SEV, the rep string I/O instructions need to be unrolled
83 * but SEV-ES supports them through the #VC handler.
84 */
85 case CC_ATTR_GUEST_UNROLL_STRING_IO:
86 return (sev_status & MSR_AMD64_SEV_ENABLED) &&
87 !(sev_status & MSR_AMD64_SEV_ES_ENABLED);
88
89 case CC_ATTR_GUEST_SEV_SNP:
90 return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
91
92 default:
93 return false;
94 }
95#else
96 return false;
97#endif
98}
99
100bool noinstr cc_platform_has(enum cc_attr attr)
101{
102 switch (cc_vendor) {
103 case CC_VENDOR_AMD:
104 return amd_cc_platform_has(attr);
105 case CC_VENDOR_INTEL:
106 return intel_cc_platform_has(attr);
107 default:
108 return false;
109 }
110}
111EXPORT_SYMBOL_GPL(cc_platform_has);
112
113u64 cc_mkenc(u64 val)
114{
115 /*
116 * Both AMD and Intel use a bit in the page table to indicate
117 * encryption status of the page.
118 *
119 * - for AMD, bit *set* means the page is encrypted
120 * - for AMD with vTOM and for Intel, *clear* means encrypted
121 */
122 switch (cc_vendor) {
123 case CC_VENDOR_AMD:
124 if (sev_status & MSR_AMD64_SNP_VTOM)
125 return val & ~cc_mask;
126 else
127 return val | cc_mask;
128 case CC_VENDOR_INTEL:
129 return val & ~cc_mask;
130 default:
131 return val;
132 }
133}
134
135u64 cc_mkdec(u64 val)
136{
137 /* See comment in cc_mkenc() */
138 switch (cc_vendor) {
139 case CC_VENDOR_AMD:
140 if (sev_status & MSR_AMD64_SNP_VTOM)
141 return val | cc_mask;
142 else
143 return val & ~cc_mask;
144 case CC_VENDOR_INTEL:
145 return val | cc_mask;
146 default:
147 return val;
148 }
149}
150EXPORT_SYMBOL_GPL(cc_mkdec);
151
152__init void cc_set_mask(u64 mask)
153{
154 cc_mask = mask;
155}