Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * prepare to run common code
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
8#define DISABLE_BRANCH_PROFILING
9
10/* cpu_feature_enabled() cannot be used this early */
11#define USE_EARLY_PGTABLE_L5
12
13#include <linux/init.h>
14#include <linux/linkage.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/percpu.h>
19#include <linux/start_kernel.h>
20#include <linux/io.h>
21#include <linux/memblock.h>
22#include <linux/mem_encrypt.h>
23
24#include <asm/processor.h>
25#include <asm/proto.h>
26#include <asm/smp.h>
27#include <asm/setup.h>
28#include <asm/desc.h>
29#include <asm/pgtable.h>
30#include <asm/tlbflush.h>
31#include <asm/sections.h>
32#include <asm/kdebug.h>
33#include <asm/e820/api.h>
34#include <asm/bios_ebda.h>
35#include <asm/bootparam_utils.h>
36#include <asm/microcode.h>
37#include <asm/kasan.h>
38#include <asm/fixmap.h>
39
40/*
41 * Manage page tables very early on.
42 */
43extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
44static unsigned int __initdata next_early_pgt;
45pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
46
47#ifdef CONFIG_X86_5LEVEL
48unsigned int __pgtable_l5_enabled __ro_after_init;
49unsigned int pgdir_shift __ro_after_init = 39;
50EXPORT_SYMBOL(pgdir_shift);
51unsigned int ptrs_per_p4d __ro_after_init = 1;
52EXPORT_SYMBOL(ptrs_per_p4d);
53#endif
54
55#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
56unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
57EXPORT_SYMBOL(page_offset_base);
58unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
59EXPORT_SYMBOL(vmalloc_base);
60unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
61EXPORT_SYMBOL(vmemmap_base);
62#endif
63
64#define __head __section(.head.text)
65
66static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
67{
68 return ptr - (void *)_text + (void *)physaddr;
69}
70
71static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
72{
73 return fixup_pointer(ptr, physaddr);
74}
75
76#ifdef CONFIG_X86_5LEVEL
77static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
78{
79 return fixup_pointer(ptr, physaddr);
80}
81
82static bool __head check_la57_support(unsigned long physaddr)
83{
84 /*
85 * 5-level paging is detected and enabled at kernel decomression
86 * stage. Only check if it has been enabled there.
87 */
88 if (!(native_read_cr4() & X86_CR4_LA57))
89 return false;
90
91 *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
92 *fixup_int(&pgdir_shift, physaddr) = 48;
93 *fixup_int(&ptrs_per_p4d, physaddr) = 512;
94 *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
95 *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
96 *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
97
98 return true;
99}
100#else
101static bool __head check_la57_support(unsigned long physaddr)
102{
103 return false;
104}
105#endif
106
107/* Code in __startup_64() can be relocated during execution, but the compiler
108 * doesn't have to generate PC-relative relocations when accessing globals from
109 * that function. Clang actually does not generate them, which leads to
110 * boot-time crashes. To work around this problem, every global pointer must
111 * be adjusted using fixup_pointer().
112 */
113unsigned long __head __startup_64(unsigned long physaddr,
114 struct boot_params *bp)
115{
116 unsigned long vaddr, vaddr_end;
117 unsigned long load_delta, *p;
118 unsigned long pgtable_flags;
119 pgdval_t *pgd;
120 p4dval_t *p4d;
121 pudval_t *pud;
122 pmdval_t *pmd, pmd_entry;
123 pteval_t *mask_ptr;
124 bool la57;
125 int i;
126 unsigned int *next_pgt_ptr;
127
128 la57 = check_la57_support(physaddr);
129
130 /* Is the address too large? */
131 if (physaddr >> MAX_PHYSMEM_BITS)
132 for (;;);
133
134 /*
135 * Compute the delta between the address I am compiled to run at
136 * and the address I am actually running at.
137 */
138 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
139
140 /* Is the address not 2M aligned? */
141 if (load_delta & ~PMD_PAGE_MASK)
142 for (;;);
143
144 /* Activate Secure Memory Encryption (SME) if supported and enabled */
145 sme_enable(bp);
146
147 /* Include the SME encryption mask in the fixup value */
148 load_delta += sme_get_me_mask();
149
150 /* Fixup the physical addresses in the page table */
151
152 pgd = fixup_pointer(&early_top_pgt, physaddr);
153 p = pgd + pgd_index(__START_KERNEL_map);
154 if (la57)
155 *p = (unsigned long)level4_kernel_pgt;
156 else
157 *p = (unsigned long)level3_kernel_pgt;
158 *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
159
160 if (la57) {
161 p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
162 p4d[511] += load_delta;
163 }
164
165 pud = fixup_pointer(&level3_kernel_pgt, physaddr);
166 pud[510] += load_delta;
167 pud[511] += load_delta;
168
169 pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
170 for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
171 pmd[i] += load_delta;
172
173 /*
174 * Set up the identity mapping for the switchover. These
175 * entries should *NOT* have the global bit set! This also
176 * creates a bunch of nonsense entries but that is fine --
177 * it avoids problems around wraparound.
178 */
179
180 next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
181 pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
182 pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
183
184 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
185
186 if (la57) {
187 p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
188 physaddr);
189
190 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
191 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
192 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
193
194 i = physaddr >> P4D_SHIFT;
195 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
196 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
197 } else {
198 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
199 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
200 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
201 }
202
203 i = physaddr >> PUD_SHIFT;
204 pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
205 pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
206
207 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
208 /* Filter out unsupported __PAGE_KERNEL_* bits: */
209 mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
210 pmd_entry &= *mask_ptr;
211 pmd_entry += sme_get_me_mask();
212 pmd_entry += physaddr;
213
214 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
215 int idx = i + (physaddr >> PMD_SHIFT);
216
217 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
218 }
219
220 /*
221 * Fixup the kernel text+data virtual addresses. Note that
222 * we might write invalid pmds, when the kernel is relocated
223 * cleanup_highmap() fixes this up along with the mappings
224 * beyond _end.
225 *
226 * Only the region occupied by the kernel image has so far
227 * been checked against the table of usable memory regions
228 * provided by the firmware, so invalidate pages outside that
229 * region. A page table entry that maps to a reserved area of
230 * memory would allow processor speculation into that area,
231 * and on some hardware (particularly the UV platform) even
232 * speculative access to some reserved areas is caught as an
233 * error, causing the BIOS to halt the system.
234 */
235
236 pmd = fixup_pointer(level2_kernel_pgt, physaddr);
237
238 /* invalidate pages before the kernel image */
239 for (i = 0; i < pmd_index((unsigned long)_text); i++)
240 pmd[i] &= ~_PAGE_PRESENT;
241
242 /* fixup pages that are part of the kernel image */
243 for (; i <= pmd_index((unsigned long)_end); i++)
244 if (pmd[i] & _PAGE_PRESENT)
245 pmd[i] += load_delta;
246
247 /* invalidate pages after the kernel image */
248 for (; i < PTRS_PER_PMD; i++)
249 pmd[i] &= ~_PAGE_PRESENT;
250
251 /*
252 * Fixup phys_base - remove the memory encryption mask to obtain
253 * the true physical address.
254 */
255 *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
256
257 /* Encrypt the kernel and related (if SME is active) */
258 sme_encrypt_kernel(bp);
259
260 /*
261 * Clear the memory encryption mask from the .bss..decrypted section.
262 * The bss section will be memset to zero later in the initialization so
263 * there is no need to zero it after changing the memory encryption
264 * attribute.
265 */
266 if (mem_encrypt_active()) {
267 vaddr = (unsigned long)__start_bss_decrypted;
268 vaddr_end = (unsigned long)__end_bss_decrypted;
269 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
270 i = pmd_index(vaddr);
271 pmd[i] -= sme_get_me_mask();
272 }
273 }
274
275 /*
276 * Return the SME encryption mask (if SME is active) to be used as a
277 * modifier for the initial pgdir entry programmed into CR3.
278 */
279 return sme_get_me_mask();
280}
281
282unsigned long __startup_secondary_64(void)
283{
284 /*
285 * Return the SME encryption mask (if SME is active) to be used as a
286 * modifier for the initial pgdir entry programmed into CR3.
287 */
288 return sme_get_me_mask();
289}
290
291/* Wipe all early page tables except for the kernel symbol map */
292static void __init reset_early_page_tables(void)
293{
294 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
295 next_early_pgt = 0;
296 write_cr3(__sme_pa_nodebug(early_top_pgt));
297}
298
299/* Create a new PMD entry */
300int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
301{
302 unsigned long physaddr = address - __PAGE_OFFSET;
303 pgdval_t pgd, *pgd_p;
304 p4dval_t p4d, *p4d_p;
305 pudval_t pud, *pud_p;
306 pmdval_t *pmd_p;
307
308 /* Invalid address or early pgt is done ? */
309 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
310 return -1;
311
312again:
313 pgd_p = &early_top_pgt[pgd_index(address)].pgd;
314 pgd = *pgd_p;
315
316 /*
317 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
318 * critical -- __PAGE_OFFSET would point us back into the dynamic
319 * range and we might end up looping forever...
320 */
321 if (!pgtable_l5_enabled())
322 p4d_p = pgd_p;
323 else if (pgd)
324 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
325 else {
326 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
327 reset_early_page_tables();
328 goto again;
329 }
330
331 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
332 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
333 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
334 }
335 p4d_p += p4d_index(address);
336 p4d = *p4d_p;
337
338 if (p4d)
339 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
340 else {
341 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
342 reset_early_page_tables();
343 goto again;
344 }
345
346 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
347 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
348 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
349 }
350 pud_p += pud_index(address);
351 pud = *pud_p;
352
353 if (pud)
354 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
355 else {
356 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
357 reset_early_page_tables();
358 goto again;
359 }
360
361 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
362 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
363 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
364 }
365 pmd_p[pmd_index(address)] = pmd;
366
367 return 0;
368}
369
370int __init early_make_pgtable(unsigned long address)
371{
372 unsigned long physaddr = address - __PAGE_OFFSET;
373 pmdval_t pmd;
374
375 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
376
377 return __early_make_pgtable(address, pmd);
378}
379
380/* Don't add a printk in there. printk relies on the PDA which is not initialized
381 yet. */
382static void __init clear_bss(void)
383{
384 memset(__bss_start, 0,
385 (unsigned long) __bss_stop - (unsigned long) __bss_start);
386}
387
388static unsigned long get_cmd_line_ptr(void)
389{
390 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
391
392 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
393
394 return cmd_line_ptr;
395}
396
397static void __init copy_bootdata(char *real_mode_data)
398{
399 char * command_line;
400 unsigned long cmd_line_ptr;
401
402 /*
403 * If SME is active, this will create decrypted mappings of the
404 * boot data in advance of the copy operations.
405 */
406 sme_map_bootdata(real_mode_data);
407
408 memcpy(&boot_params, real_mode_data, sizeof(boot_params));
409 sanitize_boot_params(&boot_params);
410 cmd_line_ptr = get_cmd_line_ptr();
411 if (cmd_line_ptr) {
412 command_line = __va(cmd_line_ptr);
413 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
414 }
415
416 /*
417 * The old boot data is no longer needed and won't be reserved,
418 * freeing up that memory for use by the system. If SME is active,
419 * we need to remove the mappings that were created so that the
420 * memory doesn't remain mapped as decrypted.
421 */
422 sme_unmap_bootdata(real_mode_data);
423}
424
425asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
426{
427 /*
428 * Build-time sanity checks on the kernel image and module
429 * area mappings. (these are purely build-time and produce no code)
430 */
431 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
432 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
433 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
434 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
435 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
436 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
437 MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
438 (__START_KERNEL & PGDIR_MASK)));
439 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
440
441 cr4_init_shadow();
442
443 /* Kill off the identity-map trampoline */
444 reset_early_page_tables();
445
446 clear_bss();
447
448 clear_page(init_top_pgt);
449
450 /*
451 * SME support may update early_pmd_flags to include the memory
452 * encryption mask, so it needs to be called before anything
453 * that may generate a page fault.
454 */
455 sme_early_init();
456
457 kasan_early_init();
458
459 idt_setup_early_handler();
460
461 copy_bootdata(__va(real_mode_data));
462
463 /*
464 * Load microcode early on BSP.
465 */
466 load_ucode_bsp();
467
468 /* set init_top_pgt kernel high mapping*/
469 init_top_pgt[511] = early_top_pgt[511];
470
471 x86_64_start_reservations(real_mode_data);
472}
473
474void __init x86_64_start_reservations(char *real_mode_data)
475{
476 /* version is always not zero if it is copied */
477 if (!boot_params.hdr.version)
478 copy_bootdata(__va(real_mode_data));
479
480 x86_early_init_platform_quirks();
481
482 switch (boot_params.hdr.hardware_subarch) {
483 case X86_SUBARCH_INTEL_MID:
484 x86_intel_mid_early_setup();
485 break;
486 default:
487 break;
488 }
489
490 start_kernel();
491}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * prepare to run common code
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
8#define DISABLE_BRANCH_PROFILING
9
10/* cpu_feature_enabled() cannot be used this early */
11#define USE_EARLY_PGTABLE_L5
12
13#include <linux/init.h>
14#include <linux/linkage.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/percpu.h>
19#include <linux/start_kernel.h>
20#include <linux/io.h>
21#include <linux/memblock.h>
22#include <linux/cc_platform.h>
23#include <linux/pgtable.h>
24
25#include <asm/asm.h>
26#include <asm/page_64.h>
27#include <asm/processor.h>
28#include <asm/proto.h>
29#include <asm/smp.h>
30#include <asm/setup.h>
31#include <asm/desc.h>
32#include <asm/tlbflush.h>
33#include <asm/sections.h>
34#include <asm/kdebug.h>
35#include <asm/e820/api.h>
36#include <asm/bios_ebda.h>
37#include <asm/bootparam_utils.h>
38#include <asm/microcode.h>
39#include <asm/kasan.h>
40#include <asm/fixmap.h>
41#include <asm/realmode.h>
42#include <asm/extable.h>
43#include <asm/trapnr.h>
44#include <asm/sev.h>
45#include <asm/tdx.h>
46#include <asm/init.h>
47
48/*
49 * Manage page tables very early on.
50 */
51extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
52static unsigned int __initdata next_early_pgt;
53pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
54
55#ifdef CONFIG_X86_5LEVEL
56unsigned int __pgtable_l5_enabled __ro_after_init;
57unsigned int pgdir_shift __ro_after_init = 39;
58EXPORT_SYMBOL(pgdir_shift);
59unsigned int ptrs_per_p4d __ro_after_init = 1;
60EXPORT_SYMBOL(ptrs_per_p4d);
61#endif
62
63#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
64unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
65EXPORT_SYMBOL(page_offset_base);
66unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
67EXPORT_SYMBOL(vmalloc_base);
68unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
69EXPORT_SYMBOL(vmemmap_base);
70#endif
71
72static inline bool check_la57_support(void)
73{
74 if (!IS_ENABLED(CONFIG_X86_5LEVEL))
75 return false;
76
77 /*
78 * 5-level paging is detected and enabled at kernel decompression
79 * stage. Only check if it has been enabled there.
80 */
81 if (!(native_read_cr4() & X86_CR4_LA57))
82 return false;
83
84 RIP_REL_REF(__pgtable_l5_enabled) = 1;
85 RIP_REL_REF(pgdir_shift) = 48;
86 RIP_REL_REF(ptrs_per_p4d) = 512;
87 RIP_REL_REF(page_offset_base) = __PAGE_OFFSET_BASE_L5;
88 RIP_REL_REF(vmalloc_base) = __VMALLOC_BASE_L5;
89 RIP_REL_REF(vmemmap_base) = __VMEMMAP_BASE_L5;
90
91 return true;
92}
93
94static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
95{
96 unsigned long vaddr, vaddr_end;
97 int i;
98
99 /* Encrypt the kernel and related (if SME is active) */
100 sme_encrypt_kernel(bp);
101
102 /*
103 * Clear the memory encryption mask from the .bss..decrypted section.
104 * The bss section will be memset to zero later in the initialization so
105 * there is no need to zero it after changing the memory encryption
106 * attribute.
107 */
108 if (sme_get_me_mask()) {
109 vaddr = (unsigned long)__start_bss_decrypted;
110 vaddr_end = (unsigned long)__end_bss_decrypted;
111
112 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
113 /*
114 * On SNP, transition the page to shared in the RMP table so that
115 * it is consistent with the page table attribute change.
116 *
117 * __start_bss_decrypted has a virtual address in the high range
118 * mapping (kernel .text). PVALIDATE, by way of
119 * early_snp_set_memory_shared(), requires a valid virtual
120 * address but the kernel is currently running off of the identity
121 * mapping so use __pa() to get a *currently* valid virtual address.
122 */
123 early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
124
125 i = pmd_index(vaddr);
126 pmd[i] -= sme_get_me_mask();
127 }
128 }
129
130 /*
131 * Return the SME encryption mask (if SME is active) to be used as a
132 * modifier for the initial pgdir entry programmed into CR3.
133 */
134 return sme_get_me_mask();
135}
136
137/* Code in __startup_64() can be relocated during execution, but the compiler
138 * doesn't have to generate PC-relative relocations when accessing globals from
139 * that function. Clang actually does not generate them, which leads to
140 * boot-time crashes. To work around this problem, every global pointer must
141 * be accessed using RIP_REL_REF().
142 */
143unsigned long __head __startup_64(unsigned long physaddr,
144 struct boot_params *bp)
145{
146 pmd_t (*early_pgts)[PTRS_PER_PMD] = RIP_REL_REF(early_dynamic_pgts);
147 unsigned long pgtable_flags;
148 unsigned long load_delta;
149 pgdval_t *pgd;
150 p4dval_t *p4d;
151 pudval_t *pud;
152 pmdval_t *pmd, pmd_entry;
153 bool la57;
154 int i;
155
156 la57 = check_la57_support();
157
158 /* Is the address too large? */
159 if (physaddr >> MAX_PHYSMEM_BITS)
160 for (;;);
161
162 /*
163 * Compute the delta between the address I am compiled to run at
164 * and the address I am actually running at.
165 */
166 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
167 RIP_REL_REF(phys_base) = load_delta;
168
169 /* Is the address not 2M aligned? */
170 if (load_delta & ~PMD_MASK)
171 for (;;);
172
173 /* Include the SME encryption mask in the fixup value */
174 load_delta += sme_get_me_mask();
175
176 /* Fixup the physical addresses in the page table */
177
178 pgd = &RIP_REL_REF(early_top_pgt)->pgd;
179 pgd[pgd_index(__START_KERNEL_map)] += load_delta;
180
181 if (la57) {
182 p4d = (p4dval_t *)&RIP_REL_REF(level4_kernel_pgt);
183 p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
184
185 pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
186 }
187
188 RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta;
189 RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta;
190
191 for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
192 RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta;
193
194 /*
195 * Set up the identity mapping for the switchover. These
196 * entries should *NOT* have the global bit set! This also
197 * creates a bunch of nonsense entries but that is fine --
198 * it avoids problems around wraparound.
199 */
200
201 pud = &early_pgts[0]->pmd;
202 pmd = &early_pgts[1]->pmd;
203 RIP_REL_REF(next_early_pgt) = 2;
204
205 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
206
207 if (la57) {
208 p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd;
209
210 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
211 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
212 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
213
214 i = physaddr >> P4D_SHIFT;
215 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
216 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
217 } else {
218 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
219 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
220 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
221 }
222
223 i = physaddr >> PUD_SHIFT;
224 pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
225 pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
226
227 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
228 /* Filter out unsupported __PAGE_KERNEL_* bits: */
229 pmd_entry &= RIP_REL_REF(__supported_pte_mask);
230 pmd_entry += sme_get_me_mask();
231 pmd_entry += physaddr;
232
233 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
234 int idx = i + (physaddr >> PMD_SHIFT);
235
236 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
237 }
238
239 /*
240 * Fixup the kernel text+data virtual addresses. Note that
241 * we might write invalid pmds, when the kernel is relocated
242 * cleanup_highmap() fixes this up along with the mappings
243 * beyond _end.
244 *
245 * Only the region occupied by the kernel image has so far
246 * been checked against the table of usable memory regions
247 * provided by the firmware, so invalidate pages outside that
248 * region. A page table entry that maps to a reserved area of
249 * memory would allow processor speculation into that area,
250 * and on some hardware (particularly the UV platform) even
251 * speculative access to some reserved areas is caught as an
252 * error, causing the BIOS to halt the system.
253 */
254
255 pmd = &RIP_REL_REF(level2_kernel_pgt)->pmd;
256
257 /* invalidate pages before the kernel image */
258 for (i = 0; i < pmd_index((unsigned long)_text); i++)
259 pmd[i] &= ~_PAGE_PRESENT;
260
261 /* fixup pages that are part of the kernel image */
262 for (; i <= pmd_index((unsigned long)_end); i++)
263 if (pmd[i] & _PAGE_PRESENT)
264 pmd[i] += load_delta;
265
266 /* invalidate pages after the kernel image */
267 for (; i < PTRS_PER_PMD; i++)
268 pmd[i] &= ~_PAGE_PRESENT;
269
270 return sme_postprocess_startup(bp, pmd);
271}
272
273/* Wipe all early page tables except for the kernel symbol map */
274static void __init reset_early_page_tables(void)
275{
276 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
277 next_early_pgt = 0;
278 write_cr3(__sme_pa_nodebug(early_top_pgt));
279}
280
281/* Create a new PMD entry */
282bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
283{
284 unsigned long physaddr = address - __PAGE_OFFSET;
285 pgdval_t pgd, *pgd_p;
286 p4dval_t p4d, *p4d_p;
287 pudval_t pud, *pud_p;
288 pmdval_t *pmd_p;
289
290 /* Invalid address or early pgt is done ? */
291 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
292 return false;
293
294again:
295 pgd_p = &early_top_pgt[pgd_index(address)].pgd;
296 pgd = *pgd_p;
297
298 /*
299 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
300 * critical -- __PAGE_OFFSET would point us back into the dynamic
301 * range and we might end up looping forever...
302 */
303 if (!pgtable_l5_enabled())
304 p4d_p = pgd_p;
305 else if (pgd)
306 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
307 else {
308 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
309 reset_early_page_tables();
310 goto again;
311 }
312
313 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
314 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
315 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
316 }
317 p4d_p += p4d_index(address);
318 p4d = *p4d_p;
319
320 if (p4d)
321 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
322 else {
323 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
324 reset_early_page_tables();
325 goto again;
326 }
327
328 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
329 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
330 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
331 }
332 pud_p += pud_index(address);
333 pud = *pud_p;
334
335 if (pud)
336 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
337 else {
338 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
339 reset_early_page_tables();
340 goto again;
341 }
342
343 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
344 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
345 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
346 }
347 pmd_p[pmd_index(address)] = pmd;
348
349 return true;
350}
351
352static bool __init early_make_pgtable(unsigned long address)
353{
354 unsigned long physaddr = address - __PAGE_OFFSET;
355 pmdval_t pmd;
356
357 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
358
359 return __early_make_pgtable(address, pmd);
360}
361
362void __init do_early_exception(struct pt_regs *regs, int trapnr)
363{
364 if (trapnr == X86_TRAP_PF &&
365 early_make_pgtable(native_read_cr2()))
366 return;
367
368 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
369 trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
370 return;
371
372 if (trapnr == X86_TRAP_VE && tdx_early_handle_ve(regs))
373 return;
374
375 early_fixup_exception(regs, trapnr);
376}
377
378/* Don't add a printk in there. printk relies on the PDA which is not initialized
379 yet. */
380void __init clear_bss(void)
381{
382 memset(__bss_start, 0,
383 (unsigned long) __bss_stop - (unsigned long) __bss_start);
384 memset(__brk_base, 0,
385 (unsigned long) __brk_limit - (unsigned long) __brk_base);
386}
387
388static unsigned long get_cmd_line_ptr(void)
389{
390 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
391
392 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
393
394 return cmd_line_ptr;
395}
396
397static void __init copy_bootdata(char *real_mode_data)
398{
399 char * command_line;
400 unsigned long cmd_line_ptr;
401
402 /*
403 * If SME is active, this will create decrypted mappings of the
404 * boot data in advance of the copy operations.
405 */
406 sme_map_bootdata(real_mode_data);
407
408 memcpy(&boot_params, real_mode_data, sizeof(boot_params));
409 sanitize_boot_params(&boot_params);
410 cmd_line_ptr = get_cmd_line_ptr();
411 if (cmd_line_ptr) {
412 command_line = __va(cmd_line_ptr);
413 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
414 }
415
416 /*
417 * The old boot data is no longer needed and won't be reserved,
418 * freeing up that memory for use by the system. If SME is active,
419 * we need to remove the mappings that were created so that the
420 * memory doesn't remain mapped as decrypted.
421 */
422 sme_unmap_bootdata(real_mode_data);
423}
424
425asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode_data)
426{
427 /*
428 * Build-time sanity checks on the kernel image and module
429 * area mappings. (these are purely build-time and produce no code)
430 */
431 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
432 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
433 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
434 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
435 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
436 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
437 MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
438 (__START_KERNEL & PGDIR_MASK)));
439 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
440
441 cr4_init_shadow();
442
443 /* Kill off the identity-map trampoline */
444 reset_early_page_tables();
445
446 clear_bss();
447
448 /*
449 * This needs to happen *before* kasan_early_init() because latter maps stuff
450 * into that page.
451 */
452 clear_page(init_top_pgt);
453
454 /*
455 * SME support may update early_pmd_flags to include the memory
456 * encryption mask, so it needs to be called before anything
457 * that may generate a page fault.
458 */
459 sme_early_init();
460
461 kasan_early_init();
462
463 /*
464 * Flush global TLB entries which could be left over from the trampoline page
465 * table.
466 *
467 * This needs to happen *after* kasan_early_init() as KASAN-enabled .configs
468 * instrument native_write_cr4() so KASAN must be initialized for that
469 * instrumentation to work.
470 */
471 __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
472
473 idt_setup_early_handler();
474
475 /* Needed before cc_platform_has() can be used for TDX */
476 tdx_early_init();
477
478 copy_bootdata(__va(real_mode_data));
479
480 /*
481 * Load microcode early on BSP.
482 */
483 load_ucode_bsp();
484
485 /* set init_top_pgt kernel high mapping*/
486 init_top_pgt[511] = early_top_pgt[511];
487
488 x86_64_start_reservations(real_mode_data);
489}
490
491void __init __noreturn x86_64_start_reservations(char *real_mode_data)
492{
493 /* version is always not zero if it is copied */
494 if (!boot_params.hdr.version)
495 copy_bootdata(__va(real_mode_data));
496
497 x86_early_init_platform_quirks();
498
499 switch (boot_params.hdr.hardware_subarch) {
500 case X86_SUBARCH_INTEL_MID:
501 x86_intel_mid_early_setup();
502 break;
503 default:
504 break;
505 }
506
507 start_kernel();
508}
509
510/*
511 * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
512 * used until the idt_table takes over. On the boot CPU this happens in
513 * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
514 * this happens in the functions called from head_64.S.
515 *
516 * The idt_table can't be used that early because all the code modifying it is
517 * in idt.c and can be instrumented by tracing or KASAN, which both don't work
518 * during early CPU bringup. Also the idt_table has the runtime vectors
519 * configured which require certain CPU state to be setup already (like TSS),
520 * which also hasn't happened yet in early CPU bringup.
521 */
522static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
523
524/* This may run while still in the direct mapping */
525static void __head startup_64_load_idt(void *vc_handler)
526{
527 struct desc_ptr desc = {
528 .address = (unsigned long)&RIP_REL_REF(bringup_idt_table),
529 .size = sizeof(bringup_idt_table) - 1,
530 };
531 struct idt_data data;
532 gate_desc idt_desc;
533
534 /* @vc_handler is set only for a VMM Communication Exception */
535 if (vc_handler) {
536 init_idt_data(&data, X86_TRAP_VC, vc_handler);
537 idt_init_desc(&idt_desc, &data);
538 native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
539 }
540
541 native_load_idt(&desc);
542}
543
544/* This is used when running on kernel addresses */
545void early_setup_idt(void)
546{
547 void *handler = NULL;
548
549 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
550 setup_ghcb();
551 handler = vc_boot_ghcb;
552 }
553
554 startup_64_load_idt(handler);
555}
556
557/*
558 * Setup boot CPU state needed before kernel switches to virtual addresses.
559 */
560void __head startup_64_setup_gdt_idt(void)
561{
562 struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt);
563 void *handler = NULL;
564
565 struct desc_ptr startup_gdt_descr = {
566 .address = (unsigned long)&RIP_REL_REF(*gdt),
567 .size = GDT_SIZE - 1,
568 };
569
570 /* Load GDT */
571 native_load_gdt(&startup_gdt_descr);
572
573 /* New GDT is live - reload data segment registers */
574 asm volatile("movl %%eax, %%ds\n"
575 "movl %%eax, %%ss\n"
576 "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
577
578 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
579 handler = &RIP_REL_REF(vc_no_ghcb);
580
581 startup_64_load_idt(handler);
582}