Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#define DISABLE_BRANCH_PROFILING
11
12/*
13 * Since we're dealing with identity mappings, physical and virtual
14 * addresses are the same, so override these defines which are ultimately
15 * used by the headers in misc.h.
16 */
17#define __pa(x) ((unsigned long)(x))
18#define __va(x) ((void *)((unsigned long)(x)))
19
20/*
21 * Special hack: we have to be careful, because no indirections are
22 * allowed here, and paravirt_ops is a kind of one. As it will only run in
23 * baremetal anyway, we just keep it from happening. (This list needs to
24 * be extended when new paravirt and debugging variants are added.)
25 */
26#undef CONFIG_PARAVIRT
27#undef CONFIG_PARAVIRT_XXL
28#undef CONFIG_PARAVIRT_SPINLOCKS
29
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/mem_encrypt.h>
33
34#include <asm/setup.h>
35#include <asm/sections.h>
36#include <asm/cmdline.h>
37
38#include "mm_internal.h"
39
40#define PGD_FLAGS _KERNPG_TABLE_NOENC
41#define P4D_FLAGS _KERNPG_TABLE_NOENC
42#define PUD_FLAGS _KERNPG_TABLE_NOENC
43#define PMD_FLAGS _KERNPG_TABLE_NOENC
44
45#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
46
47#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
48#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
49 (_PAGE_PAT | _PAGE_PWT))
50
51#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
52
53#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
54
55#define PTE_FLAGS_DEC PTE_FLAGS
56#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
57 (_PAGE_PAT | _PAGE_PWT))
58
59#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
60
61struct sme_populate_pgd_data {
62 void *pgtable_area;
63 pgd_t *pgd;
64
65 pmdval_t pmd_flags;
66 pteval_t pte_flags;
67 unsigned long paddr;
68
69 unsigned long vaddr;
70 unsigned long vaddr_end;
71};
72
73/*
74 * This work area lives in the .init.scratch section, which lives outside of
75 * the kernel proper. It is sized to hold the intermediate copy buffer and
76 * more than enough pagetable pages.
77 *
78 * By using this section, the kernel can be encrypted in place and it
79 * avoids any possibility of boot parameters or initramfs images being
80 * placed such that the in-place encryption logic overwrites them. This
81 * section is 2MB aligned to allow for simple pagetable setup using only
82 * PMD entries (see vmlinux.lds.S).
83 */
84static char sme_workarea[2 * PMD_PAGE_SIZE] __section(.init.scratch);
85
86static char sme_cmdline_arg[] __initdata = "mem_encrypt";
87static char sme_cmdline_on[] __initdata = "on";
88static char sme_cmdline_off[] __initdata = "off";
89
90static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
91{
92 unsigned long pgd_start, pgd_end, pgd_size;
93 pgd_t *pgd_p;
94
95 pgd_start = ppd->vaddr & PGDIR_MASK;
96 pgd_end = ppd->vaddr_end & PGDIR_MASK;
97
98 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
99
100 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
101
102 memset(pgd_p, 0, pgd_size);
103}
104
105static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
106{
107 pgd_t *pgd;
108 p4d_t *p4d;
109 pud_t *pud;
110 pmd_t *pmd;
111
112 pgd = ppd->pgd + pgd_index(ppd->vaddr);
113 if (pgd_none(*pgd)) {
114 p4d = ppd->pgtable_area;
115 memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
116 ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
117 set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
118 }
119
120 p4d = p4d_offset(pgd, ppd->vaddr);
121 if (p4d_none(*p4d)) {
122 pud = ppd->pgtable_area;
123 memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
124 ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
125 set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
126 }
127
128 pud = pud_offset(p4d, ppd->vaddr);
129 if (pud_none(*pud)) {
130 pmd = ppd->pgtable_area;
131 memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
132 ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
133 set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
134 }
135
136 if (pud_large(*pud))
137 return NULL;
138
139 return pud;
140}
141
142static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
143{
144 pud_t *pud;
145 pmd_t *pmd;
146
147 pud = sme_prepare_pgd(ppd);
148 if (!pud)
149 return;
150
151 pmd = pmd_offset(pud, ppd->vaddr);
152 if (pmd_large(*pmd))
153 return;
154
155 set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
156}
157
158static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
159{
160 pud_t *pud;
161 pmd_t *pmd;
162 pte_t *pte;
163
164 pud = sme_prepare_pgd(ppd);
165 if (!pud)
166 return;
167
168 pmd = pmd_offset(pud, ppd->vaddr);
169 if (pmd_none(*pmd)) {
170 pte = ppd->pgtable_area;
171 memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
172 ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
173 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
174 }
175
176 if (pmd_large(*pmd))
177 return;
178
179 pte = pte_offset_map(pmd, ppd->vaddr);
180 if (pte_none(*pte))
181 set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
182}
183
184static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
185{
186 while (ppd->vaddr < ppd->vaddr_end) {
187 sme_populate_pgd_large(ppd);
188
189 ppd->vaddr += PMD_PAGE_SIZE;
190 ppd->paddr += PMD_PAGE_SIZE;
191 }
192}
193
194static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
195{
196 while (ppd->vaddr < ppd->vaddr_end) {
197 sme_populate_pgd(ppd);
198
199 ppd->vaddr += PAGE_SIZE;
200 ppd->paddr += PAGE_SIZE;
201 }
202}
203
204static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
205 pmdval_t pmd_flags, pteval_t pte_flags)
206{
207 unsigned long vaddr_end;
208
209 ppd->pmd_flags = pmd_flags;
210 ppd->pte_flags = pte_flags;
211
212 /* Save original end value since we modify the struct value */
213 vaddr_end = ppd->vaddr_end;
214
215 /* If start is not 2MB aligned, create PTE entries */
216 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
217 __sme_map_range_pte(ppd);
218
219 /* Create PMD entries */
220 ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
221 __sme_map_range_pmd(ppd);
222
223 /* If end is not 2MB aligned, create PTE entries */
224 ppd->vaddr_end = vaddr_end;
225 __sme_map_range_pte(ppd);
226}
227
228static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
229{
230 __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
231}
232
233static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
234{
235 __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
236}
237
238static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
239{
240 __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
241}
242
243static unsigned long __init sme_pgtable_calc(unsigned long len)
244{
245 unsigned long entries = 0, tables = 0;
246
247 /*
248 * Perform a relatively simplistic calculation of the pagetable
249 * entries that are needed. Those mappings will be covered mostly
250 * by 2MB PMD entries so we can conservatively calculate the required
251 * number of P4D, PUD and PMD structures needed to perform the
252 * mappings. For mappings that are not 2MB aligned, PTE mappings
253 * would be needed for the start and end portion of the address range
254 * that fall outside of the 2MB alignment. This results in, at most,
255 * two extra pages to hold PTE entries for each range that is mapped.
256 * Incrementing the count for each covers the case where the addresses
257 * cross entries.
258 */
259
260 /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
261 if (PTRS_PER_P4D > 1)
262 entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
263 entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
264 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
265 entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
266
267 /*
268 * Now calculate the added pagetable structures needed to populate
269 * the new pagetables.
270 */
271
272 if (PTRS_PER_P4D > 1)
273 tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
274 tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
275 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
276
277 return entries + tables;
278}
279
280void __init sme_encrypt_kernel(struct boot_params *bp)
281{
282 unsigned long workarea_start, workarea_end, workarea_len;
283 unsigned long execute_start, execute_end, execute_len;
284 unsigned long kernel_start, kernel_end, kernel_len;
285 unsigned long initrd_start, initrd_end, initrd_len;
286 struct sme_populate_pgd_data ppd;
287 unsigned long pgtable_area_len;
288 unsigned long decrypted_base;
289
290 if (!sme_active())
291 return;
292
293 /*
294 * Prepare for encrypting the kernel and initrd by building new
295 * pagetables with the necessary attributes needed to encrypt the
296 * kernel in place.
297 *
298 * One range of virtual addresses will map the memory occupied
299 * by the kernel and initrd as encrypted.
300 *
301 * Another range of virtual addresses will map the memory occupied
302 * by the kernel and initrd as decrypted and write-protected.
303 *
304 * The use of write-protect attribute will prevent any of the
305 * memory from being cached.
306 */
307
308 /* Physical addresses gives us the identity mapped virtual addresses */
309 kernel_start = __pa_symbol(_text);
310 kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
311 kernel_len = kernel_end - kernel_start;
312
313 initrd_start = 0;
314 initrd_end = 0;
315 initrd_len = 0;
316#ifdef CONFIG_BLK_DEV_INITRD
317 initrd_len = (unsigned long)bp->hdr.ramdisk_size |
318 ((unsigned long)bp->ext_ramdisk_size << 32);
319 if (initrd_len) {
320 initrd_start = (unsigned long)bp->hdr.ramdisk_image |
321 ((unsigned long)bp->ext_ramdisk_image << 32);
322 initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
323 initrd_len = initrd_end - initrd_start;
324 }
325#endif
326
327 /*
328 * We're running identity mapped, so we must obtain the address to the
329 * SME encryption workarea using rip-relative addressing.
330 */
331 asm ("lea sme_workarea(%%rip), %0"
332 : "=r" (workarea_start)
333 : "p" (sme_workarea));
334
335 /*
336 * Calculate required number of workarea bytes needed:
337 * executable encryption area size:
338 * stack page (PAGE_SIZE)
339 * encryption routine page (PAGE_SIZE)
340 * intermediate copy buffer (PMD_PAGE_SIZE)
341 * pagetable structures for the encryption of the kernel
342 * pagetable structures for workarea (in case not currently mapped)
343 */
344 execute_start = workarea_start;
345 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
346 execute_len = execute_end - execute_start;
347
348 /*
349 * One PGD for both encrypted and decrypted mappings and a set of
350 * PUDs and PMDs for each of the encrypted and decrypted mappings.
351 */
352 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
353 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
354 if (initrd_len)
355 pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
356
357 /* PUDs and PMDs needed in the current pagetables for the workarea */
358 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
359
360 /*
361 * The total workarea includes the executable encryption area and
362 * the pagetable area. The start of the workarea is already 2MB
363 * aligned, align the end of the workarea on a 2MB boundary so that
364 * we don't try to create/allocate PTE entries from the workarea
365 * before it is mapped.
366 */
367 workarea_len = execute_len + pgtable_area_len;
368 workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
369
370 /*
371 * Set the address to the start of where newly created pagetable
372 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
373 * structures are created when the workarea is added to the current
374 * pagetables and when the new encrypted and decrypted kernel
375 * mappings are populated.
376 */
377 ppd.pgtable_area = (void *)execute_end;
378
379 /*
380 * Make sure the current pagetable structure has entries for
381 * addressing the workarea.
382 */
383 ppd.pgd = (pgd_t *)native_read_cr3_pa();
384 ppd.paddr = workarea_start;
385 ppd.vaddr = workarea_start;
386 ppd.vaddr_end = workarea_end;
387 sme_map_range_decrypted(&ppd);
388
389 /* Flush the TLB - no globals so cr3 is enough */
390 native_write_cr3(__native_read_cr3());
391
392 /*
393 * A new pagetable structure is being built to allow for the kernel
394 * and initrd to be encrypted. It starts with an empty PGD that will
395 * then be populated with new PUDs and PMDs as the encrypted and
396 * decrypted kernel mappings are created.
397 */
398 ppd.pgd = ppd.pgtable_area;
399 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
400 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
401
402 /*
403 * A different PGD index/entry must be used to get different
404 * pagetable entries for the decrypted mapping. Choose the next
405 * PGD index and convert it to a virtual address to be used as
406 * the base of the mapping.
407 */
408 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
409 if (initrd_len) {
410 unsigned long check_base;
411
412 check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
413 decrypted_base = max(decrypted_base, check_base);
414 }
415 decrypted_base <<= PGDIR_SHIFT;
416
417 /* Add encrypted kernel (identity) mappings */
418 ppd.paddr = kernel_start;
419 ppd.vaddr = kernel_start;
420 ppd.vaddr_end = kernel_end;
421 sme_map_range_encrypted(&ppd);
422
423 /* Add decrypted, write-protected kernel (non-identity) mappings */
424 ppd.paddr = kernel_start;
425 ppd.vaddr = kernel_start + decrypted_base;
426 ppd.vaddr_end = kernel_end + decrypted_base;
427 sme_map_range_decrypted_wp(&ppd);
428
429 if (initrd_len) {
430 /* Add encrypted initrd (identity) mappings */
431 ppd.paddr = initrd_start;
432 ppd.vaddr = initrd_start;
433 ppd.vaddr_end = initrd_end;
434 sme_map_range_encrypted(&ppd);
435 /*
436 * Add decrypted, write-protected initrd (non-identity) mappings
437 */
438 ppd.paddr = initrd_start;
439 ppd.vaddr = initrd_start + decrypted_base;
440 ppd.vaddr_end = initrd_end + decrypted_base;
441 sme_map_range_decrypted_wp(&ppd);
442 }
443
444 /* Add decrypted workarea mappings to both kernel mappings */
445 ppd.paddr = workarea_start;
446 ppd.vaddr = workarea_start;
447 ppd.vaddr_end = workarea_end;
448 sme_map_range_decrypted(&ppd);
449
450 ppd.paddr = workarea_start;
451 ppd.vaddr = workarea_start + decrypted_base;
452 ppd.vaddr_end = workarea_end + decrypted_base;
453 sme_map_range_decrypted(&ppd);
454
455 /* Perform the encryption */
456 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
457 kernel_len, workarea_start, (unsigned long)ppd.pgd);
458
459 if (initrd_len)
460 sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
461 initrd_len, workarea_start,
462 (unsigned long)ppd.pgd);
463
464 /*
465 * At this point we are running encrypted. Remove the mappings for
466 * the decrypted areas - all that is needed for this is to remove
467 * the PGD entry/entries.
468 */
469 ppd.vaddr = kernel_start + decrypted_base;
470 ppd.vaddr_end = kernel_end + decrypted_base;
471 sme_clear_pgd(&ppd);
472
473 if (initrd_len) {
474 ppd.vaddr = initrd_start + decrypted_base;
475 ppd.vaddr_end = initrd_end + decrypted_base;
476 sme_clear_pgd(&ppd);
477 }
478
479 ppd.vaddr = workarea_start + decrypted_base;
480 ppd.vaddr_end = workarea_end + decrypted_base;
481 sme_clear_pgd(&ppd);
482
483 /* Flush the TLB - no globals so cr3 is enough */
484 native_write_cr3(__native_read_cr3());
485}
486
487void __init sme_enable(struct boot_params *bp)
488{
489 const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
490 unsigned int eax, ebx, ecx, edx;
491 unsigned long feature_mask;
492 bool active_by_default;
493 unsigned long me_mask;
494 char buffer[16];
495 u64 msr;
496
497 /* Check for the SME/SEV support leaf */
498 eax = 0x80000000;
499 ecx = 0;
500 native_cpuid(&eax, &ebx, &ecx, &edx);
501 if (eax < 0x8000001f)
502 return;
503
504#define AMD_SME_BIT BIT(0)
505#define AMD_SEV_BIT BIT(1)
506 /*
507 * Set the feature mask (SME or SEV) based on whether we are
508 * running under a hypervisor.
509 */
510 eax = 1;
511 ecx = 0;
512 native_cpuid(&eax, &ebx, &ecx, &edx);
513 feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
514
515 /*
516 * Check for the SME/SEV feature:
517 * CPUID Fn8000_001F[EAX]
518 * - Bit 0 - Secure Memory Encryption support
519 * - Bit 1 - Secure Encrypted Virtualization support
520 * CPUID Fn8000_001F[EBX]
521 * - Bits 5:0 - Pagetable bit position used to indicate encryption
522 */
523 eax = 0x8000001f;
524 ecx = 0;
525 native_cpuid(&eax, &ebx, &ecx, &edx);
526 if (!(eax & feature_mask))
527 return;
528
529 me_mask = 1UL << (ebx & 0x3f);
530
531 /* Check if memory encryption is enabled */
532 if (feature_mask == AMD_SME_BIT) {
533 /* For SME, check the SYSCFG MSR */
534 msr = __rdmsr(MSR_K8_SYSCFG);
535 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
536 return;
537 } else {
538 /* For SEV, check the SEV MSR */
539 msr = __rdmsr(MSR_AMD64_SEV);
540 if (!(msr & MSR_AMD64_SEV_ENABLED))
541 return;
542
543 /* SEV state cannot be controlled by a command line option */
544 sme_me_mask = me_mask;
545 sev_enabled = true;
546 physical_mask &= ~sme_me_mask;
547 return;
548 }
549
550 /*
551 * Fixups have not been applied to phys_base yet and we're running
552 * identity mapped, so we must obtain the address to the SME command
553 * line argument data using rip-relative addressing.
554 */
555 asm ("lea sme_cmdline_arg(%%rip), %0"
556 : "=r" (cmdline_arg)
557 : "p" (sme_cmdline_arg));
558 asm ("lea sme_cmdline_on(%%rip), %0"
559 : "=r" (cmdline_on)
560 : "p" (sme_cmdline_on));
561 asm ("lea sme_cmdline_off(%%rip), %0"
562 : "=r" (cmdline_off)
563 : "p" (sme_cmdline_off));
564
565 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
566 active_by_default = true;
567 else
568 active_by_default = false;
569
570 cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
571 ((u64)bp->ext_cmd_line_ptr << 32));
572
573 cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
574
575 if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
576 sme_me_mask = me_mask;
577 else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
578 sme_me_mask = 0;
579 else
580 sme_me_mask = active_by_default ? me_mask : 0;
581
582 physical_mask &= ~sme_me_mask;
583}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD Memory Encryption Support
4 *
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 */
9
10#define DISABLE_BRANCH_PROFILING
11
12/*
13 * Since we're dealing with identity mappings, physical and virtual
14 * addresses are the same, so override these defines which are ultimately
15 * used by the headers in misc.h.
16 */
17#define __pa(x) ((unsigned long)(x))
18#define __va(x) ((void *)((unsigned long)(x)))
19
20/*
21 * Special hack: we have to be careful, because no indirections are
22 * allowed here, and paravirt_ops is a kind of one. As it will only run in
23 * baremetal anyway, we just keep it from happening. (This list needs to
24 * be extended when new paravirt and debugging variants are added.)
25 */
26#undef CONFIG_PARAVIRT
27#undef CONFIG_PARAVIRT_XXL
28#undef CONFIG_PARAVIRT_SPINLOCKS
29
30/*
31 * This code runs before CPU feature bits are set. By default, the
32 * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
33 * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
34 * is provided to handle this situation and, instead, use a variable that
35 * has been set by the early boot code.
36 */
37#define USE_EARLY_PGTABLE_L5
38
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/mem_encrypt.h>
42#include <linux/cc_platform.h>
43
44#include <asm/init.h>
45#include <asm/setup.h>
46#include <asm/sections.h>
47#include <asm/coco.h>
48#include <asm/sev.h>
49
50#include "mm_internal.h"
51
52#define PGD_FLAGS _KERNPG_TABLE_NOENC
53#define P4D_FLAGS _KERNPG_TABLE_NOENC
54#define PUD_FLAGS _KERNPG_TABLE_NOENC
55#define PMD_FLAGS _KERNPG_TABLE_NOENC
56
57#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
58
59#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
60#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
61 (_PAGE_PAT_LARGE | _PAGE_PWT))
62
63#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
64
65#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
66
67#define PTE_FLAGS_DEC PTE_FLAGS
68#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
69 (_PAGE_PAT | _PAGE_PWT))
70
71#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
72
73struct sme_populate_pgd_data {
74 void *pgtable_area;
75 pgd_t *pgd;
76
77 pmdval_t pmd_flags;
78 pteval_t pte_flags;
79 unsigned long paddr;
80
81 unsigned long vaddr;
82 unsigned long vaddr_end;
83};
84
85/*
86 * This work area lives in the .init.scratch section, which lives outside of
87 * the kernel proper. It is sized to hold the intermediate copy buffer and
88 * more than enough pagetable pages.
89 *
90 * By using this section, the kernel can be encrypted in place and it
91 * avoids any possibility of boot parameters or initramfs images being
92 * placed such that the in-place encryption logic overwrites them. This
93 * section is 2MB aligned to allow for simple pagetable setup using only
94 * PMD entries (see vmlinux.lds.S).
95 */
96static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
97
98static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
99{
100 unsigned long pgd_start, pgd_end, pgd_size;
101 pgd_t *pgd_p;
102
103 pgd_start = ppd->vaddr & PGDIR_MASK;
104 pgd_end = ppd->vaddr_end & PGDIR_MASK;
105
106 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
107
108 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
109
110 memset(pgd_p, 0, pgd_size);
111}
112
113static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
114{
115 pgd_t *pgd;
116 p4d_t *p4d;
117 pud_t *pud;
118 pmd_t *pmd;
119
120 pgd = ppd->pgd + pgd_index(ppd->vaddr);
121 if (pgd_none(*pgd)) {
122 p4d = ppd->pgtable_area;
123 memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
124 ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
125 set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
126 }
127
128 p4d = p4d_offset(pgd, ppd->vaddr);
129 if (p4d_none(*p4d)) {
130 pud = ppd->pgtable_area;
131 memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
132 ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
133 set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
134 }
135
136 pud = pud_offset(p4d, ppd->vaddr);
137 if (pud_none(*pud)) {
138 pmd = ppd->pgtable_area;
139 memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
140 ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
141 set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
142 }
143
144 if (pud_leaf(*pud))
145 return NULL;
146
147 return pud;
148}
149
150static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
151{
152 pud_t *pud;
153 pmd_t *pmd;
154
155 pud = sme_prepare_pgd(ppd);
156 if (!pud)
157 return;
158
159 pmd = pmd_offset(pud, ppd->vaddr);
160 if (pmd_leaf(*pmd))
161 return;
162
163 set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
164}
165
166static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
167{
168 pud_t *pud;
169 pmd_t *pmd;
170 pte_t *pte;
171
172 pud = sme_prepare_pgd(ppd);
173 if (!pud)
174 return;
175
176 pmd = pmd_offset(pud, ppd->vaddr);
177 if (pmd_none(*pmd)) {
178 pte = ppd->pgtable_area;
179 memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
180 ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
181 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
182 }
183
184 if (pmd_leaf(*pmd))
185 return;
186
187 pte = pte_offset_kernel(pmd, ppd->vaddr);
188 if (pte_none(*pte))
189 set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
190}
191
192static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
193{
194 while (ppd->vaddr < ppd->vaddr_end) {
195 sme_populate_pgd_large(ppd);
196
197 ppd->vaddr += PMD_SIZE;
198 ppd->paddr += PMD_SIZE;
199 }
200}
201
202static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
203{
204 while (ppd->vaddr < ppd->vaddr_end) {
205 sme_populate_pgd(ppd);
206
207 ppd->vaddr += PAGE_SIZE;
208 ppd->paddr += PAGE_SIZE;
209 }
210}
211
212static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
213 pmdval_t pmd_flags, pteval_t pte_flags)
214{
215 unsigned long vaddr_end;
216
217 ppd->pmd_flags = pmd_flags;
218 ppd->pte_flags = pte_flags;
219
220 /* Save original end value since we modify the struct value */
221 vaddr_end = ppd->vaddr_end;
222
223 /* If start is not 2MB aligned, create PTE entries */
224 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
225 __sme_map_range_pte(ppd);
226
227 /* Create PMD entries */
228 ppd->vaddr_end = vaddr_end & PMD_MASK;
229 __sme_map_range_pmd(ppd);
230
231 /* If end is not 2MB aligned, create PTE entries */
232 ppd->vaddr_end = vaddr_end;
233 __sme_map_range_pte(ppd);
234}
235
236static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
237{
238 __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
239}
240
241static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
242{
243 __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
244}
245
246static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
247{
248 __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
249}
250
251static unsigned long __head sme_pgtable_calc(unsigned long len)
252{
253 unsigned long entries = 0, tables = 0;
254
255 /*
256 * Perform a relatively simplistic calculation of the pagetable
257 * entries that are needed. Those mappings will be covered mostly
258 * by 2MB PMD entries so we can conservatively calculate the required
259 * number of P4D, PUD and PMD structures needed to perform the
260 * mappings. For mappings that are not 2MB aligned, PTE mappings
261 * would be needed for the start and end portion of the address range
262 * that fall outside of the 2MB alignment. This results in, at most,
263 * two extra pages to hold PTE entries for each range that is mapped.
264 * Incrementing the count for each covers the case where the addresses
265 * cross entries.
266 */
267
268 /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
269 if (PTRS_PER_P4D > 1)
270 entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
271 entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
272 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
273 entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
274
275 /*
276 * Now calculate the added pagetable structures needed to populate
277 * the new pagetables.
278 */
279
280 if (PTRS_PER_P4D > 1)
281 tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
282 tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
283 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
284
285 return entries + tables;
286}
287
288void __head sme_encrypt_kernel(struct boot_params *bp)
289{
290 unsigned long workarea_start, workarea_end, workarea_len;
291 unsigned long execute_start, execute_end, execute_len;
292 unsigned long kernel_start, kernel_end, kernel_len;
293 unsigned long initrd_start, initrd_end, initrd_len;
294 struct sme_populate_pgd_data ppd;
295 unsigned long pgtable_area_len;
296 unsigned long decrypted_base;
297
298 /*
299 * This is early code, use an open coded check for SME instead of
300 * using cc_platform_has(). This eliminates worries about removing
301 * instrumentation or checking boot_cpu_data in the cc_platform_has()
302 * function.
303 */
304 if (!sme_get_me_mask() ||
305 RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
306 return;
307
308 /*
309 * Prepare for encrypting the kernel and initrd by building new
310 * pagetables with the necessary attributes needed to encrypt the
311 * kernel in place.
312 *
313 * One range of virtual addresses will map the memory occupied
314 * by the kernel and initrd as encrypted.
315 *
316 * Another range of virtual addresses will map the memory occupied
317 * by the kernel and initrd as decrypted and write-protected.
318 *
319 * The use of write-protect attribute will prevent any of the
320 * memory from being cached.
321 */
322
323 kernel_start = (unsigned long)RIP_REL_REF(_text);
324 kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
325 kernel_len = kernel_end - kernel_start;
326
327 initrd_start = 0;
328 initrd_end = 0;
329 initrd_len = 0;
330#ifdef CONFIG_BLK_DEV_INITRD
331 initrd_len = (unsigned long)bp->hdr.ramdisk_size |
332 ((unsigned long)bp->ext_ramdisk_size << 32);
333 if (initrd_len) {
334 initrd_start = (unsigned long)bp->hdr.ramdisk_image |
335 ((unsigned long)bp->ext_ramdisk_image << 32);
336 initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
337 initrd_len = initrd_end - initrd_start;
338 }
339#endif
340
341 /*
342 * Calculate required number of workarea bytes needed:
343 * executable encryption area size:
344 * stack page (PAGE_SIZE)
345 * encryption routine page (PAGE_SIZE)
346 * intermediate copy buffer (PMD_SIZE)
347 * pagetable structures for the encryption of the kernel
348 * pagetable structures for workarea (in case not currently mapped)
349 */
350 execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
351 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
352 execute_len = execute_end - execute_start;
353
354 /*
355 * One PGD for both encrypted and decrypted mappings and a set of
356 * PUDs and PMDs for each of the encrypted and decrypted mappings.
357 */
358 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
359 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
360 if (initrd_len)
361 pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
362
363 /* PUDs and PMDs needed in the current pagetables for the workarea */
364 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
365
366 /*
367 * The total workarea includes the executable encryption area and
368 * the pagetable area. The start of the workarea is already 2MB
369 * aligned, align the end of the workarea on a 2MB boundary so that
370 * we don't try to create/allocate PTE entries from the workarea
371 * before it is mapped.
372 */
373 workarea_len = execute_len + pgtable_area_len;
374 workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
375
376 /*
377 * Set the address to the start of where newly created pagetable
378 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
379 * structures are created when the workarea is added to the current
380 * pagetables and when the new encrypted and decrypted kernel
381 * mappings are populated.
382 */
383 ppd.pgtable_area = (void *)execute_end;
384
385 /*
386 * Make sure the current pagetable structure has entries for
387 * addressing the workarea.
388 */
389 ppd.pgd = (pgd_t *)native_read_cr3_pa();
390 ppd.paddr = workarea_start;
391 ppd.vaddr = workarea_start;
392 ppd.vaddr_end = workarea_end;
393 sme_map_range_decrypted(&ppd);
394
395 /* Flush the TLB - no globals so cr3 is enough */
396 native_write_cr3(__native_read_cr3());
397
398 /*
399 * A new pagetable structure is being built to allow for the kernel
400 * and initrd to be encrypted. It starts with an empty PGD that will
401 * then be populated with new PUDs and PMDs as the encrypted and
402 * decrypted kernel mappings are created.
403 */
404 ppd.pgd = ppd.pgtable_area;
405 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
406 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
407
408 /*
409 * A different PGD index/entry must be used to get different
410 * pagetable entries for the decrypted mapping. Choose the next
411 * PGD index and convert it to a virtual address to be used as
412 * the base of the mapping.
413 */
414 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
415 if (initrd_len) {
416 unsigned long check_base;
417
418 check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
419 decrypted_base = max(decrypted_base, check_base);
420 }
421 decrypted_base <<= PGDIR_SHIFT;
422
423 /* Add encrypted kernel (identity) mappings */
424 ppd.paddr = kernel_start;
425 ppd.vaddr = kernel_start;
426 ppd.vaddr_end = kernel_end;
427 sme_map_range_encrypted(&ppd);
428
429 /* Add decrypted, write-protected kernel (non-identity) mappings */
430 ppd.paddr = kernel_start;
431 ppd.vaddr = kernel_start + decrypted_base;
432 ppd.vaddr_end = kernel_end + decrypted_base;
433 sme_map_range_decrypted_wp(&ppd);
434
435 if (initrd_len) {
436 /* Add encrypted initrd (identity) mappings */
437 ppd.paddr = initrd_start;
438 ppd.vaddr = initrd_start;
439 ppd.vaddr_end = initrd_end;
440 sme_map_range_encrypted(&ppd);
441 /*
442 * Add decrypted, write-protected initrd (non-identity) mappings
443 */
444 ppd.paddr = initrd_start;
445 ppd.vaddr = initrd_start + decrypted_base;
446 ppd.vaddr_end = initrd_end + decrypted_base;
447 sme_map_range_decrypted_wp(&ppd);
448 }
449
450 /* Add decrypted workarea mappings to both kernel mappings */
451 ppd.paddr = workarea_start;
452 ppd.vaddr = workarea_start;
453 ppd.vaddr_end = workarea_end;
454 sme_map_range_decrypted(&ppd);
455
456 ppd.paddr = workarea_start;
457 ppd.vaddr = workarea_start + decrypted_base;
458 ppd.vaddr_end = workarea_end + decrypted_base;
459 sme_map_range_decrypted(&ppd);
460
461 /* Perform the encryption */
462 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
463 kernel_len, workarea_start, (unsigned long)ppd.pgd);
464
465 if (initrd_len)
466 sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
467 initrd_len, workarea_start,
468 (unsigned long)ppd.pgd);
469
470 /*
471 * At this point we are running encrypted. Remove the mappings for
472 * the decrypted areas - all that is needed for this is to remove
473 * the PGD entry/entries.
474 */
475 ppd.vaddr = kernel_start + decrypted_base;
476 ppd.vaddr_end = kernel_end + decrypted_base;
477 sme_clear_pgd(&ppd);
478
479 if (initrd_len) {
480 ppd.vaddr = initrd_start + decrypted_base;
481 ppd.vaddr_end = initrd_end + decrypted_base;
482 sme_clear_pgd(&ppd);
483 }
484
485 ppd.vaddr = workarea_start + decrypted_base;
486 ppd.vaddr_end = workarea_end + decrypted_base;
487 sme_clear_pgd(&ppd);
488
489 /* Flush the TLB - no globals so cr3 is enough */
490 native_write_cr3(__native_read_cr3());
491}
492
493void __head sme_enable(struct boot_params *bp)
494{
495 unsigned int eax, ebx, ecx, edx;
496 unsigned long feature_mask;
497 unsigned long me_mask;
498 bool snp_en;
499 u64 msr;
500
501 snp_en = snp_init(bp);
502
503 /* Check for the SME/SEV support leaf */
504 eax = 0x80000000;
505 ecx = 0;
506 native_cpuid(&eax, &ebx, &ecx, &edx);
507 if (eax < 0x8000001f)
508 return;
509
510#define AMD_SME_BIT BIT(0)
511#define AMD_SEV_BIT BIT(1)
512
513 /*
514 * Check for the SME/SEV feature:
515 * CPUID Fn8000_001F[EAX]
516 * - Bit 0 - Secure Memory Encryption support
517 * - Bit 1 - Secure Encrypted Virtualization support
518 * CPUID Fn8000_001F[EBX]
519 * - Bits 5:0 - Pagetable bit position used to indicate encryption
520 */
521 eax = 0x8000001f;
522 ecx = 0;
523 native_cpuid(&eax, &ebx, &ecx, &edx);
524 /* Check whether SEV or SME is supported */
525 if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
526 return;
527
528 me_mask = 1UL << (ebx & 0x3f);
529
530 /* Check the SEV MSR whether SEV or SME is enabled */
531 RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
532 feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
533
534 /*
535 * Any discrepancies between the presence of a CC blob and SNP
536 * enablement abort the guest.
537 */
538 if (snp_en ^ !!(msr & MSR_AMD64_SEV_SNP_ENABLED))
539 snp_abort();
540
541 /* Check if memory encryption is enabled */
542 if (feature_mask == AMD_SME_BIT) {
543 if (!(bp->hdr.xloadflags & XLF_MEM_ENCRYPTION))
544 return;
545
546 /*
547 * No SME if Hypervisor bit is set. This check is here to
548 * prevent a guest from trying to enable SME. For running as a
549 * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
550 * might be other hypervisors which emulate that MSR as non-zero
551 * or even pass it through to the guest.
552 * A malicious hypervisor can still trick a guest into this
553 * path, but there is no way to protect against that.
554 */
555 eax = 1;
556 ecx = 0;
557 native_cpuid(&eax, &ebx, &ecx, &edx);
558 if (ecx & BIT(31))
559 return;
560
561 /* For SME, check the SYSCFG MSR */
562 msr = __rdmsr(MSR_AMD64_SYSCFG);
563 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
564 return;
565 }
566
567 RIP_REL_REF(sme_me_mask) = me_mask;
568 physical_mask &= ~me_mask;
569 cc_vendor = CC_VENDOR_AMD;
570 cc_set_mask(me_mask);
571}