Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10 */
11
12#include <linux/export.h>
13#include <linux/linkage.h>
14#include <linux/threads.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23#include <asm/nops.h>
24#include "../entry/calling.h"
25#include <asm/nospec-branch.h>
26#include <asm/apicdef.h>
27#include <asm/fixmap.h>
28#include <asm/smp.h>
29#include <asm/thread_info.h>
30
31/*
32 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
33 * because we need identity-mapped pages.
34 */
35
36 __HEAD
37 .code64
38SYM_CODE_START_NOALIGN(startup_64)
39 UNWIND_HINT_END_OF_STACK
40 /*
41 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
42 * and someone has loaded an identity mapped page table
43 * for us. These identity mapped page tables map all of the
44 * kernel pages and possibly all of memory.
45 *
46 * %RSI holds the physical address of the boot_params structure
47 * provided by the bootloader. Preserve it in %R15 so C function calls
48 * will not clobber it.
49 *
50 * We come here either directly from a 64bit bootloader, or from
51 * arch/x86/boot/compressed/head_64.S.
52 *
53 * We only come here initially at boot nothing else comes here.
54 *
55 * Since we may be loaded at an address different from what we were
56 * compiled to run at we first fixup the physical addresses in our page
57 * tables and then reload them.
58 */
59 mov %rsi, %r15
60
61 /* Set up the stack for verify_cpu() */
62 leaq __top_init_kernel_stack(%rip), %rsp
63
64 /* Setup GSBASE to allow stack canary access for C code */
65 movl $MSR_GS_BASE, %ecx
66 leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
67 movl %edx, %eax
68 shrq $32, %rdx
69 wrmsr
70
71 call startup_64_setup_gdt_idt
72
73 /* Now switch to __KERNEL_CS so IRET works reliably */
74 pushq $__KERNEL_CS
75 leaq .Lon_kernel_cs(%rip), %rax
76 pushq %rax
77 lretq
78
79.Lon_kernel_cs:
80 ANNOTATE_NOENDBR
81 UNWIND_HINT_END_OF_STACK
82
83#ifdef CONFIG_AMD_MEM_ENCRYPT
84 /*
85 * Activate SEV/SME memory encryption if supported/enabled. This needs to
86 * be done now, since this also includes setup of the SEV-SNP CPUID table,
87 * which needs to be done before any CPUID instructions are executed in
88 * subsequent code. Pass the boot_params pointer as the first argument.
89 */
90 movq %r15, %rdi
91 call sme_enable
92#endif
93
94 /* Sanitize CPU configuration */
95 call verify_cpu
96
97 /*
98 * Perform pagetable fixups. Additionally, if SME is active, encrypt
99 * the kernel and retrieve the modifier (SME encryption mask if SME
100 * is active) to be added to the initial pgdir entry that will be
101 * programmed into CR3.
102 */
103 leaq _text(%rip), %rdi
104 movq %r15, %rsi
105 call __startup_64
106
107 /* Form the CR3 value being sure to include the CR3 modifier */
108 leaq early_top_pgt(%rip), %rcx
109 addq %rcx, %rax
110
111#ifdef CONFIG_AMD_MEM_ENCRYPT
112 mov %rax, %rdi
113
114 /*
115 * For SEV guests: Verify that the C-bit is correct. A malicious
116 * hypervisor could lie about the C-bit position to perform a ROP
117 * attack on the guest by writing to the unencrypted stack and wait for
118 * the next RET instruction.
119 */
120 call sev_verify_cbit
121#endif
122
123 /*
124 * Switch to early_top_pgt which still has the identity mappings
125 * present.
126 */
127 movq %rax, %cr3
128
129 /* Branch to the common startup code at its kernel virtual address */
130 ANNOTATE_RETPOLINE_SAFE
131 jmp *0f(%rip)
132SYM_CODE_END(startup_64)
133
134 __INITRODATA
1350: .quad common_startup_64
136
137 .text
138SYM_CODE_START(secondary_startup_64)
139 UNWIND_HINT_END_OF_STACK
140 ANNOTATE_NOENDBR
141 /*
142 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
143 * and someone has loaded a mapped page table.
144 *
145 * We come here either from startup_64 (using physical addresses)
146 * or from trampoline.S (using virtual addresses).
147 *
148 * Using virtual addresses from trampoline.S removes the need
149 * to have any identity mapped pages in the kernel page table
150 * after the boot processor executes this code.
151 */
152
153 /* Sanitize CPU configuration */
154 call verify_cpu
155
156 /*
157 * The secondary_startup_64_no_verify entry point is only used by
158 * SEV-ES guests. In those guests the call to verify_cpu() would cause
159 * #VC exceptions which can not be handled at this stage of secondary
160 * CPU bringup.
161 *
162 * All non SEV-ES systems, especially Intel systems, need to execute
163 * verify_cpu() above to make sure NX is enabled.
164 */
165SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
166 UNWIND_HINT_END_OF_STACK
167 ANNOTATE_NOENDBR
168
169 /* Clear %R15 which holds the boot_params pointer on the boot CPU */
170 xorl %r15d, %r15d
171
172 /* Derive the runtime physical address of init_top_pgt[] */
173 movq phys_base(%rip), %rax
174 addq $(init_top_pgt - __START_KERNEL_map), %rax
175
176 /*
177 * Retrieve the modifier (SME encryption mask if SME is active) to be
178 * added to the initial pgdir entry that will be programmed into CR3.
179 */
180#ifdef CONFIG_AMD_MEM_ENCRYPT
181 addq sme_me_mask(%rip), %rax
182#endif
183 /*
184 * Switch to the init_top_pgt here, away from the trampoline_pgd and
185 * unmap the identity mapped ranges.
186 */
187 movq %rax, %cr3
188
189SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
190 UNWIND_HINT_END_OF_STACK
191 ANNOTATE_NOENDBR
192
193 /*
194 * Create a mask of CR4 bits to preserve. Omit PGE in order to flush
195 * global 1:1 translations from the TLBs.
196 *
197 * From the SDM:
198 * "If CR4.PGE is changing from 0 to 1, there were no global TLB
199 * entries before the execution; if CR4.PGE is changing from 1 to 0,
200 * there will be no global TLB entries after the execution."
201 */
202 movl $(X86_CR4_PAE | X86_CR4_LA57), %edx
203#ifdef CONFIG_X86_MCE
204 /*
205 * Preserve CR4.MCE if the kernel will enable #MC support.
206 * Clearing MCE may fault in some environments (that also force #MC
207 * support). Any machine check that occurs before #MC support is fully
208 * configured will crash the system regardless of the CR4.MCE value set
209 * here.
210 */
211 orl $X86_CR4_MCE, %edx
212#endif
213 movq %cr4, %rcx
214 andl %edx, %ecx
215
216 /* Even if ignored in long mode, set PSE uniformly on all logical CPUs. */
217 btsl $X86_CR4_PSE_BIT, %ecx
218 movq %rcx, %cr4
219
220 /*
221 * Set CR4.PGE to re-enable global translations.
222 */
223 btsl $X86_CR4_PGE_BIT, %ecx
224 movq %rcx, %cr4
225
226#ifdef CONFIG_SMP
227 /*
228 * For parallel boot, the APIC ID is read from the APIC, and then
229 * used to look up the CPU number. For booting a single CPU, the
230 * CPU number is encoded in smpboot_control.
231 *
232 * Bit 31 STARTUP_READ_APICID (Read APICID from APIC)
233 * Bit 0-23 CPU# if STARTUP_xx flags are not set
234 */
235 movl smpboot_control(%rip), %ecx
236 testl $STARTUP_READ_APICID, %ecx
237 jnz .Lread_apicid
238 /*
239 * No control bit set, single CPU bringup. CPU number is provided
240 * in bit 0-23. This is also the boot CPU case (CPU number 0).
241 */
242 andl $(~STARTUP_PARALLEL_MASK), %ecx
243 jmp .Lsetup_cpu
244
245.Lread_apicid:
246 /* Check whether X2APIC mode is already enabled */
247 mov $MSR_IA32_APICBASE, %ecx
248 rdmsr
249 testl $X2APIC_ENABLE, %eax
250 jnz .Lread_apicid_msr
251
252#ifdef CONFIG_X86_X2APIC
253 /*
254 * If system is in X2APIC mode then MMIO base might not be
255 * mapped causing the MMIO read below to fault. Faults can't
256 * be handled at that point.
257 */
258 cmpl $0, x2apic_mode(%rip)
259 jz .Lread_apicid_mmio
260
261 /* Force the AP into X2APIC mode. */
262 orl $X2APIC_ENABLE, %eax
263 wrmsr
264 jmp .Lread_apicid_msr
265#endif
266
267.Lread_apicid_mmio:
268 /* Read the APIC ID from the fix-mapped MMIO space. */
269 movq apic_mmio_base(%rip), %rcx
270 addq $APIC_ID, %rcx
271 movl (%rcx), %eax
272 shr $24, %eax
273 jmp .Llookup_AP
274
275.Lread_apicid_msr:
276 mov $APIC_X2APIC_ID_MSR, %ecx
277 rdmsr
278
279.Llookup_AP:
280 /* EAX contains the APIC ID of the current CPU */
281 xorl %ecx, %ecx
282 leaq cpuid_to_apicid(%rip), %rbx
283
284.Lfind_cpunr:
285 cmpl (%rbx,%rcx,4), %eax
286 jz .Lsetup_cpu
287 inc %ecx
288#ifdef CONFIG_FORCE_NR_CPUS
289 cmpl $NR_CPUS, %ecx
290#else
291 cmpl nr_cpu_ids(%rip), %ecx
292#endif
293 jb .Lfind_cpunr
294
295 /* APIC ID not found in the table. Drop the trampoline lock and bail. */
296 movq trampoline_lock(%rip), %rax
297 movl $0, (%rax)
298
2991: cli
300 hlt
301 jmp 1b
302
303.Lsetup_cpu:
304 /* Get the per cpu offset for the given CPU# which is in ECX */
305 movq __per_cpu_offset(,%rcx,8), %rdx
306#else
307 xorl %edx, %edx /* zero-extended to clear all of RDX */
308#endif /* CONFIG_SMP */
309
310 /*
311 * Setup a boot time stack - Any secondary CPU will have lost its stack
312 * by now because the cr3-switch above unmaps the real-mode stack.
313 *
314 * RDX contains the per-cpu offset
315 */
316 movq pcpu_hot + X86_current_task(%rdx), %rax
317 movq TASK_threadsp(%rax), %rsp
318
319 /*
320 * Now that this CPU is running on its own stack, drop the realmode
321 * protection. For the boot CPU the pointer is NULL!
322 */
323 movq trampoline_lock(%rip), %rax
324 testq %rax, %rax
325 jz .Lsetup_gdt
326 movl $0, (%rax)
327
328.Lsetup_gdt:
329 /*
330 * We must switch to a new descriptor in kernel space for the GDT
331 * because soon the kernel won't have access anymore to the userspace
332 * addresses where we're currently running on. We have to do that here
333 * because in 32bit we couldn't load a 64bit linear address.
334 */
335 subq $16, %rsp
336 movw $(GDT_SIZE-1), (%rsp)
337 leaq gdt_page(%rdx), %rax
338 movq %rax, 2(%rsp)
339 lgdt (%rsp)
340 addq $16, %rsp
341
342 /* set up data segments */
343 xorl %eax,%eax
344 movl %eax,%ds
345 movl %eax,%ss
346 movl %eax,%es
347
348 /*
349 * We don't really need to load %fs or %gs, but load them anyway
350 * to kill any stale realmode selectors. This allows execution
351 * under VT hardware.
352 */
353 movl %eax,%fs
354 movl %eax,%gs
355
356 /* Set up %gs.
357 *
358 * The base of %gs always points to fixed_percpu_data. If the
359 * stack protector canary is enabled, it is located at %gs:40.
360 * Note that, on SMP, the boot cpu uses init data section until
361 * the per cpu areas are set up.
362 */
363 movl $MSR_GS_BASE,%ecx
364#ifndef CONFIG_SMP
365 leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
366#endif
367 movl %edx, %eax
368 shrq $32, %rdx
369 wrmsr
370
371 /* Setup and Load IDT */
372 call early_setup_idt
373
374 /* Check if nx is implemented */
375 movl $0x80000001, %eax
376 cpuid
377 movl %edx,%edi
378
379 /* Setup EFER (Extended Feature Enable Register) */
380 movl $MSR_EFER, %ecx
381 rdmsr
382 /*
383 * Preserve current value of EFER for comparison and to skip
384 * EFER writes if no change was made (for TDX guest)
385 */
386 movl %eax, %edx
387 btsl $_EFER_SCE, %eax /* Enable System Call */
388 btl $20,%edi /* No Execute supported? */
389 jnc 1f
390 btsl $_EFER_NX, %eax
391 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
392
393 /* Avoid writing EFER if no change was made (for TDX guest) */
3941: cmpl %edx, %eax
395 je 1f
396 xor %edx, %edx
397 wrmsr /* Make changes effective */
3981:
399 /* Setup cr0 */
400 movl $CR0_STATE, %eax
401 /* Make changes effective */
402 movq %rax, %cr0
403
404 /* zero EFLAGS after setting rsp */
405 pushq $0
406 popfq
407
408 /* Pass the boot_params pointer as first argument */
409 movq %r15, %rdi
410
411.Ljump_to_C_code:
412 xorl %ebp, %ebp # clear frame pointer
413 ANNOTATE_RETPOLINE_SAFE
414 callq *initial_code(%rip)
415 ud2
416SYM_CODE_END(secondary_startup_64)
417
418#include "verify_cpu.S"
419#include "sev_verify_cbit.S"
420
421#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
422/*
423 * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for
424 * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
425 * unplug. Everything is set up already except the stack.
426 */
427SYM_CODE_START(soft_restart_cpu)
428 ANNOTATE_NOENDBR
429 UNWIND_HINT_END_OF_STACK
430
431 /* Find the idle task stack */
432 movq PER_CPU_VAR(pcpu_hot + X86_current_task), %rcx
433 movq TASK_threadsp(%rcx), %rsp
434
435 jmp .Ljump_to_C_code
436SYM_CODE_END(soft_restart_cpu)
437#endif
438
439#ifdef CONFIG_AMD_MEM_ENCRYPT
440/*
441 * VC Exception handler used during early boot when running on kernel
442 * addresses, but before the switch to the idt_table can be made.
443 * The early_idt_handler_array can't be used here because it calls into a lot
444 * of __init code and this handler is also used during CPU offlining/onlining.
445 * Therefore this handler ends up in the .text section so that it stays around
446 * when .init.text is freed.
447 */
448SYM_CODE_START_NOALIGN(vc_boot_ghcb)
449 UNWIND_HINT_IRET_REGS offset=8
450 ENDBR
451
452 /* Build pt_regs */
453 PUSH_AND_CLEAR_REGS
454
455 /* Call C handler */
456 movq %rsp, %rdi
457 movq ORIG_RAX(%rsp), %rsi
458 movq initial_vc_handler(%rip), %rax
459 ANNOTATE_RETPOLINE_SAFE
460 call *%rax
461
462 /* Unwind pt_regs */
463 POP_REGS
464
465 /* Remove Error Code */
466 addq $8, %rsp
467
468 iretq
469SYM_CODE_END(vc_boot_ghcb)
470#endif
471
472 /* Both SMP bootup and ACPI suspend change these variables */
473 __REFDATA
474 .balign 8
475SYM_DATA(initial_code, .quad x86_64_start_kernel)
476#ifdef CONFIG_AMD_MEM_ENCRYPT
477SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
478#endif
479
480SYM_DATA(trampoline_lock, .quad 0);
481 __FINITDATA
482
483 __INIT
484SYM_CODE_START(early_idt_handler_array)
485 i = 0
486 .rept NUM_EXCEPTION_VECTORS
487 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
488 UNWIND_HINT_IRET_REGS
489 ENDBR
490 pushq $0 # Dummy error code, to make stack frame uniform
491 .else
492 UNWIND_HINT_IRET_REGS offset=8
493 ENDBR
494 .endif
495 pushq $i # 72(%rsp) Vector number
496 jmp early_idt_handler_common
497 UNWIND_HINT_IRET_REGS
498 i = i + 1
499 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
500 .endr
501SYM_CODE_END(early_idt_handler_array)
502 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
503
504SYM_CODE_START_LOCAL(early_idt_handler_common)
505 UNWIND_HINT_IRET_REGS offset=16
506 /*
507 * The stack is the hardware frame, an error code or zero, and the
508 * vector number.
509 */
510 cld
511
512 incl early_recursion_flag(%rip)
513
514 /* The vector number is currently in the pt_regs->di slot. */
515 pushq %rsi /* pt_regs->si */
516 movq 8(%rsp), %rsi /* RSI = vector number */
517 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
518 pushq %rdx /* pt_regs->dx */
519 pushq %rcx /* pt_regs->cx */
520 pushq %rax /* pt_regs->ax */
521 pushq %r8 /* pt_regs->r8 */
522 pushq %r9 /* pt_regs->r9 */
523 pushq %r10 /* pt_regs->r10 */
524 pushq %r11 /* pt_regs->r11 */
525 pushq %rbx /* pt_regs->bx */
526 pushq %rbp /* pt_regs->bp */
527 pushq %r12 /* pt_regs->r12 */
528 pushq %r13 /* pt_regs->r13 */
529 pushq %r14 /* pt_regs->r14 */
530 pushq %r15 /* pt_regs->r15 */
531 UNWIND_HINT_REGS
532
533 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
534 call do_early_exception
535
536 decl early_recursion_flag(%rip)
537 jmp restore_regs_and_return_to_kernel
538SYM_CODE_END(early_idt_handler_common)
539
540#ifdef CONFIG_AMD_MEM_ENCRYPT
541/*
542 * VC Exception handler used during very early boot. The
543 * early_idt_handler_array can't be used because it returns via the
544 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
545 *
546 * XXX it does, fix this.
547 *
548 * This handler will end up in the .init.text section and not be
549 * available to boot secondary CPUs.
550 */
551SYM_CODE_START_NOALIGN(vc_no_ghcb)
552 UNWIND_HINT_IRET_REGS offset=8
553 ENDBR
554
555 /* Build pt_regs */
556 PUSH_AND_CLEAR_REGS
557
558 /* Call C handler */
559 movq %rsp, %rdi
560 movq ORIG_RAX(%rsp), %rsi
561 call do_vc_no_ghcb
562
563 /* Unwind pt_regs */
564 POP_REGS
565
566 /* Remove Error Code */
567 addq $8, %rsp
568
569 /* Pure iret required here - don't use INTERRUPT_RETURN */
570 iretq
571SYM_CODE_END(vc_no_ghcb)
572#endif
573
574#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
575/*
576 * Each PGD needs to be 8k long and 8k aligned. We do not
577 * ever go out to userspace with these, so we do not
578 * strictly *need* the second page, but this allows us to
579 * have a single set_pgd() implementation that does not
580 * need to worry about whether it has 4k or 8k to work
581 * with.
582 *
583 * This ensures PGDs are 8k long:
584 */
585#define PTI_USER_PGD_FILL 512
586/* This ensures they are 8k-aligned: */
587#define SYM_DATA_START_PTI_ALIGNED(name) \
588 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
589#else
590#define SYM_DATA_START_PTI_ALIGNED(name) \
591 SYM_DATA_START_PAGE_ALIGNED(name)
592#define PTI_USER_PGD_FILL 0
593#endif
594
595 __INITDATA
596 .balign 4
597
598SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
599 .fill 511,8,0
600 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
601 .fill PTI_USER_PGD_FILL,8,0
602SYM_DATA_END(early_top_pgt)
603
604SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
605 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
606SYM_DATA_END(early_dynamic_pgts)
607
608SYM_DATA(early_recursion_flag, .long 0)
609
610 .data
611
612#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
613SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
614 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
615 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
616 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
617 .org init_top_pgt + L4_START_KERNEL*8, 0
618 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
619 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
620 .fill PTI_USER_PGD_FILL,8,0
621SYM_DATA_END(init_top_pgt)
622
623SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
624 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
625 .fill 511, 8, 0
626SYM_DATA_END(level3_ident_pgt)
627SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
628 /*
629 * Since I easily can, map the first 1G.
630 * Don't set NX because code runs from these pages.
631 *
632 * Note: This sets _PAGE_GLOBAL despite whether
633 * the CPU supports it or it is enabled. But,
634 * the CPU should ignore the bit.
635 */
636 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
637SYM_DATA_END(level2_ident_pgt)
638#else
639SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
640 .fill 512,8,0
641 .fill PTI_USER_PGD_FILL,8,0
642SYM_DATA_END(init_top_pgt)
643#endif
644
645#ifdef CONFIG_X86_5LEVEL
646SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
647 .fill 511,8,0
648 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
649SYM_DATA_END(level4_kernel_pgt)
650#endif
651
652SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
653 .fill L3_START_KERNEL,8,0
654 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
655 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
656 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
657SYM_DATA_END(level3_kernel_pgt)
658
659SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
660 /*
661 * Kernel high mapping.
662 *
663 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
664 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
665 * 512 MiB otherwise.
666 *
667 * (NOTE: after that starts the module area, see MODULES_VADDR.)
668 *
669 * This table is eventually used by the kernel during normal runtime.
670 * Care must be taken to clear out undesired bits later, like _PAGE_RW
671 * or _PAGE_GLOBAL in some cases.
672 */
673 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
674SYM_DATA_END(level2_kernel_pgt)
675
676SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
677 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
678 pgtno = 0
679 .rept (FIXMAP_PMD_NUM)
680 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
681 + _PAGE_TABLE_NOENC;
682 pgtno = pgtno + 1
683 .endr
684 /* 6 MB reserved space + a 2MB hole */
685 .fill 4,8,0
686SYM_DATA_END(level2_fixmap_pgt)
687
688SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
689 .rept (FIXMAP_PMD_NUM)
690 .fill 512,8,0
691 .endr
692SYM_DATA_END(level1_fixmap_pgt)
693
694 .data
695 .align 16
696
697SYM_DATA(smpboot_control, .long 0)
698
699 .align 16
700/* This must match the first entry in level2_kernel_pgt */
701SYM_DATA(phys_base, .quad 0x0)
702EXPORT_SYMBOL(phys_base)
703
704#include "../xen/xen-head.S"
705
706 __PAGE_ALIGNED_BSS
707SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
708 .skip PAGE_SIZE
709SYM_DATA_END(empty_zero_page)
710EXPORT_SYMBOL(empty_zero_page)
711
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10 */
11
12
13#include <linux/linkage.h>
14#include <linux/threads.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23#include <asm/nops.h>
24#include "../entry/calling.h"
25#include <asm/export.h>
26#include <asm/nospec-branch.h>
27#include <asm/fixmap.h>
28
29#ifdef CONFIG_PARAVIRT_XXL
30#include <asm/asm-offsets.h>
31#include <asm/paravirt.h>
32#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg
33#else
34#define INTERRUPT_RETURN iretq
35#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg
36#endif
37
38/*
39 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
40 * because we need identity-mapped pages.
41 */
42#define l4_index(x) (((x) >> 39) & 511)
43#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
44
45L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
46L4_START_KERNEL = l4_index(__START_KERNEL_map)
47
48L3_START_KERNEL = pud_index(__START_KERNEL_map)
49
50 .text
51 __HEAD
52 .code64
53SYM_CODE_START_NOALIGN(startup_64)
54 UNWIND_HINT_EMPTY
55 /*
56 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
57 * and someone has loaded an identity mapped page table
58 * for us. These identity mapped page tables map all of the
59 * kernel pages and possibly all of memory.
60 *
61 * %rsi holds a physical pointer to real_mode_data.
62 *
63 * We come here either directly from a 64bit bootloader, or from
64 * arch/x86/boot/compressed/head_64.S.
65 *
66 * We only come here initially at boot nothing else comes here.
67 *
68 * Since we may be loaded at an address different from what we were
69 * compiled to run at we first fixup the physical addresses in our page
70 * tables and then reload them.
71 */
72
73 /* Set up the stack for verify_cpu(), similar to initial_stack below */
74 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
75
76 /* Sanitize CPU configuration */
77 call verify_cpu
78
79 /*
80 * Perform pagetable fixups. Additionally, if SME is active, encrypt
81 * the kernel and retrieve the modifier (SME encryption mask if SME
82 * is active) to be added to the initial pgdir entry that will be
83 * programmed into CR3.
84 */
85 leaq _text(%rip), %rdi
86 pushq %rsi
87 call __startup_64
88 popq %rsi
89
90 /* Form the CR3 value being sure to include the CR3 modifier */
91 addq $(early_top_pgt - __START_KERNEL_map), %rax
92 jmp 1f
93SYM_CODE_END(startup_64)
94
95SYM_CODE_START(secondary_startup_64)
96 UNWIND_HINT_EMPTY
97 /*
98 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
99 * and someone has loaded a mapped page table.
100 *
101 * %rsi holds a physical pointer to real_mode_data.
102 *
103 * We come here either from startup_64 (using physical addresses)
104 * or from trampoline.S (using virtual addresses).
105 *
106 * Using virtual addresses from trampoline.S removes the need
107 * to have any identity mapped pages in the kernel page table
108 * after the boot processor executes this code.
109 */
110
111 /* Sanitize CPU configuration */
112 call verify_cpu
113
114 /*
115 * Retrieve the modifier (SME encryption mask if SME is active) to be
116 * added to the initial pgdir entry that will be programmed into CR3.
117 */
118 pushq %rsi
119 call __startup_secondary_64
120 popq %rsi
121
122 /* Form the CR3 value being sure to include the CR3 modifier */
123 addq $(init_top_pgt - __START_KERNEL_map), %rax
1241:
125
126 /* Enable PAE mode, PGE and LA57 */
127 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
128#ifdef CONFIG_X86_5LEVEL
129 testl $1, __pgtable_l5_enabled(%rip)
130 jz 1f
131 orl $X86_CR4_LA57, %ecx
1321:
133#endif
134 movq %rcx, %cr4
135
136 /* Setup early boot stage 4-/5-level pagetables. */
137 addq phys_base(%rip), %rax
138 movq %rax, %cr3
139
140 /* Ensure I am executing from virtual addresses */
141 movq $1f, %rax
142 ANNOTATE_RETPOLINE_SAFE
143 jmp *%rax
1441:
145 UNWIND_HINT_EMPTY
146
147 /* Check if nx is implemented */
148 movl $0x80000001, %eax
149 cpuid
150 movl %edx,%edi
151
152 /* Setup EFER (Extended Feature Enable Register) */
153 movl $MSR_EFER, %ecx
154 rdmsr
155 btsl $_EFER_SCE, %eax /* Enable System Call */
156 btl $20,%edi /* No Execute supported? */
157 jnc 1f
158 btsl $_EFER_NX, %eax
159 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
1601: wrmsr /* Make changes effective */
161
162 /* Setup cr0 */
163 movl $CR0_STATE, %eax
164 /* Make changes effective */
165 movq %rax, %cr0
166
167 /* Setup a boot time stack */
168 movq initial_stack(%rip), %rsp
169
170 /* zero EFLAGS after setting rsp */
171 pushq $0
172 popfq
173
174 /*
175 * We must switch to a new descriptor in kernel space for the GDT
176 * because soon the kernel won't have access anymore to the userspace
177 * addresses where we're currently running on. We have to do that here
178 * because in 32bit we couldn't load a 64bit linear address.
179 */
180 lgdt early_gdt_descr(%rip)
181
182 /* set up data segments */
183 xorl %eax,%eax
184 movl %eax,%ds
185 movl %eax,%ss
186 movl %eax,%es
187
188 /*
189 * We don't really need to load %fs or %gs, but load them anyway
190 * to kill any stale realmode selectors. This allows execution
191 * under VT hardware.
192 */
193 movl %eax,%fs
194 movl %eax,%gs
195
196 /* Set up %gs.
197 *
198 * The base of %gs always points to fixed_percpu_data. If the
199 * stack protector canary is enabled, it is located at %gs:40.
200 * Note that, on SMP, the boot cpu uses init data section until
201 * the per cpu areas are set up.
202 */
203 movl $MSR_GS_BASE,%ecx
204 movl initial_gs(%rip),%eax
205 movl initial_gs+4(%rip),%edx
206 wrmsr
207
208 /* rsi is pointer to real mode structure with interesting info.
209 pass it to C */
210 movq %rsi, %rdi
211
212.Ljump_to_C_code:
213 /*
214 * Jump to run C code and to be on a real kernel address.
215 * Since we are running on identity-mapped space we have to jump
216 * to the full 64bit address, this is only possible as indirect
217 * jump. In addition we need to ensure %cs is set so we make this
218 * a far return.
219 *
220 * Note: do not change to far jump indirect with 64bit offset.
221 *
222 * AMD does not support far jump indirect with 64bit offset.
223 * AMD64 Architecture Programmer's Manual, Volume 3: states only
224 * JMP FAR mem16:16 FF /5 Far jump indirect,
225 * with the target specified by a far pointer in memory.
226 * JMP FAR mem16:32 FF /5 Far jump indirect,
227 * with the target specified by a far pointer in memory.
228 *
229 * Intel64 does support 64bit offset.
230 * Software Developer Manual Vol 2: states:
231 * FF /5 JMP m16:16 Jump far, absolute indirect,
232 * address given in m16:16
233 * FF /5 JMP m16:32 Jump far, absolute indirect,
234 * address given in m16:32.
235 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
236 * address given in m16:64.
237 */
238 pushq $.Lafter_lret # put return address on stack for unwinder
239 xorl %ebp, %ebp # clear frame pointer
240 movq initial_code(%rip), %rax
241 pushq $__KERNEL_CS # set correct cs
242 pushq %rax # target address in negative space
243 lretq
244.Lafter_lret:
245SYM_CODE_END(secondary_startup_64)
246
247#include "verify_cpu.S"
248
249#ifdef CONFIG_HOTPLUG_CPU
250/*
251 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
252 * up already except stack. We just set up stack here. Then call
253 * start_secondary() via .Ljump_to_C_code.
254 */
255SYM_CODE_START(start_cpu0)
256 UNWIND_HINT_EMPTY
257 movq initial_stack(%rip), %rsp
258 jmp .Ljump_to_C_code
259SYM_CODE_END(start_cpu0)
260#endif
261
262 /* Both SMP bootup and ACPI suspend change these variables */
263 __REFDATA
264 .balign 8
265SYM_DATA(initial_code, .quad x86_64_start_kernel)
266SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
267
268/*
269 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
270 * reliably detect the end of the stack.
271 */
272SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS)
273 __FINITDATA
274
275 __INIT
276SYM_CODE_START(early_idt_handler_array)
277 i = 0
278 .rept NUM_EXCEPTION_VECTORS
279 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
280 UNWIND_HINT_IRET_REGS
281 pushq $0 # Dummy error code, to make stack frame uniform
282 .else
283 UNWIND_HINT_IRET_REGS offset=8
284 .endif
285 pushq $i # 72(%rsp) Vector number
286 jmp early_idt_handler_common
287 UNWIND_HINT_IRET_REGS
288 i = i + 1
289 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
290 .endr
291 UNWIND_HINT_IRET_REGS offset=16
292SYM_CODE_END(early_idt_handler_array)
293
294SYM_CODE_START_LOCAL(early_idt_handler_common)
295 /*
296 * The stack is the hardware frame, an error code or zero, and the
297 * vector number.
298 */
299 cld
300
301 incl early_recursion_flag(%rip)
302
303 /* The vector number is currently in the pt_regs->di slot. */
304 pushq %rsi /* pt_regs->si */
305 movq 8(%rsp), %rsi /* RSI = vector number */
306 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
307 pushq %rdx /* pt_regs->dx */
308 pushq %rcx /* pt_regs->cx */
309 pushq %rax /* pt_regs->ax */
310 pushq %r8 /* pt_regs->r8 */
311 pushq %r9 /* pt_regs->r9 */
312 pushq %r10 /* pt_regs->r10 */
313 pushq %r11 /* pt_regs->r11 */
314 pushq %rbx /* pt_regs->bx */
315 pushq %rbp /* pt_regs->bp */
316 pushq %r12 /* pt_regs->r12 */
317 pushq %r13 /* pt_regs->r13 */
318 pushq %r14 /* pt_regs->r14 */
319 pushq %r15 /* pt_regs->r15 */
320 UNWIND_HINT_REGS
321
322 cmpq $14,%rsi /* Page fault? */
323 jnz 10f
324 GET_CR2_INTO(%rdi) /* can clobber %rax if pv */
325 call early_make_pgtable
326 andl %eax,%eax
327 jz 20f /* All good */
328
32910:
330 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
331 call early_fixup_exception
332
33320:
334 decl early_recursion_flag(%rip)
335 jmp restore_regs_and_return_to_kernel
336SYM_CODE_END(early_idt_handler_common)
337
338
339#define SYM_DATA_START_PAGE_ALIGNED(name) \
340 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
341
342#ifdef CONFIG_PAGE_TABLE_ISOLATION
343/*
344 * Each PGD needs to be 8k long and 8k aligned. We do not
345 * ever go out to userspace with these, so we do not
346 * strictly *need* the second page, but this allows us to
347 * have a single set_pgd() implementation that does not
348 * need to worry about whether it has 4k or 8k to work
349 * with.
350 *
351 * This ensures PGDs are 8k long:
352 */
353#define PTI_USER_PGD_FILL 512
354/* This ensures they are 8k-aligned: */
355#define SYM_DATA_START_PTI_ALIGNED(name) \
356 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
357#else
358#define SYM_DATA_START_PTI_ALIGNED(name) \
359 SYM_DATA_START_PAGE_ALIGNED(name)
360#define PTI_USER_PGD_FILL 0
361#endif
362
363/* Automate the creation of 1 to 1 mapping pmd entries */
364#define PMDS(START, PERM, COUNT) \
365 i = 0 ; \
366 .rept (COUNT) ; \
367 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
368 i = i + 1 ; \
369 .endr
370
371 __INITDATA
372 .balign 4
373
374SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
375 .fill 512,8,0
376 .fill PTI_USER_PGD_FILL,8,0
377SYM_DATA_END(early_top_pgt)
378
379SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
380 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
381SYM_DATA_END(early_dynamic_pgts)
382
383SYM_DATA(early_recursion_flag, .long 0)
384
385 .data
386
387#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
388SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
389 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
390 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
392 .org init_top_pgt + L4_START_KERNEL*8, 0
393 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
394 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
395 .fill PTI_USER_PGD_FILL,8,0
396SYM_DATA_END(init_top_pgt)
397
398SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
399 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
400 .fill 511, 8, 0
401SYM_DATA_END(level3_ident_pgt)
402SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
403 /*
404 * Since I easily can, map the first 1G.
405 * Don't set NX because code runs from these pages.
406 *
407 * Note: This sets _PAGE_GLOBAL despite whether
408 * the CPU supports it or it is enabled. But,
409 * the CPU should ignore the bit.
410 */
411 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
412SYM_DATA_END(level2_ident_pgt)
413#else
414SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
415 .fill 512,8,0
416 .fill PTI_USER_PGD_FILL,8,0
417SYM_DATA_END(init_top_pgt)
418#endif
419
420#ifdef CONFIG_X86_5LEVEL
421SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
422 .fill 511,8,0
423 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
424SYM_DATA_END(level4_kernel_pgt)
425#endif
426
427SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
428 .fill L3_START_KERNEL,8,0
429 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
430 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
431 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
432SYM_DATA_END(level3_kernel_pgt)
433
434SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
435 /*
436 * 512 MB kernel mapping. We spend a full page on this pagetable
437 * anyway.
438 *
439 * The kernel code+data+bss must not be bigger than that.
440 *
441 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
442 * If you want to increase this then increase MODULES_VADDR
443 * too.)
444 *
445 * This table is eventually used by the kernel during normal
446 * runtime. Care must be taken to clear out undesired bits
447 * later, like _PAGE_RW or _PAGE_GLOBAL in some cases.
448 */
449 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
450 KERNEL_IMAGE_SIZE/PMD_SIZE)
451SYM_DATA_END(level2_kernel_pgt)
452
453SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
454 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
455 pgtno = 0
456 .rept (FIXMAP_PMD_NUM)
457 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
458 + _PAGE_TABLE_NOENC;
459 pgtno = pgtno + 1
460 .endr
461 /* 6 MB reserved space + a 2MB hole */
462 .fill 4,8,0
463SYM_DATA_END(level2_fixmap_pgt)
464
465SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
466 .rept (FIXMAP_PMD_NUM)
467 .fill 512,8,0
468 .endr
469SYM_DATA_END(level1_fixmap_pgt)
470
471#undef PMDS
472
473 .data
474 .align 16
475
476SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1)
477SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page))
478
479 .align 16
480/* This must match the first entry in level2_kernel_pgt */
481SYM_DATA(phys_base, .quad 0x0)
482EXPORT_SYMBOL(phys_base)
483
484#include "../../x86/xen/xen-head.S"
485
486 __PAGE_ALIGNED_BSS
487SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
488 .skip PAGE_SIZE
489SYM_DATA_END(empty_zero_page)
490EXPORT_SYMBOL(empty_zero_page)
491