Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3 *
  4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9 */
 10
 11
 12#include <linux/linkage.h>
 13#include <linux/threads.h>
 14#include <linux/init.h>
 
 15#include <asm/segment.h>
 16#include <asm/pgtable.h>
 17#include <asm/page.h>
 18#include <asm/msr.h>
 19#include <asm/cache.h>
 20#include <asm/processor-flags.h>
 21#include <asm/percpu.h>
 
 
 
 
 
 22
 23#ifdef CONFIG_PARAVIRT
 24#include <asm/asm-offsets.h>
 25#include <asm/paravirt.h>
 26#else
 27#define GET_CR2_INTO_RCX movq %cr2, %rcx
 28#endif
 29
 30/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
 31 * because we need identity-mapped pages.
 32 *
 33 */
 34
 35#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 36
 37L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
 38L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
 39L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 40L3_START_KERNEL = pud_index(__START_KERNEL_map)
 41
 42	.text
 43	__HEAD
 44	.code64
 45	.globl startup_64
 46startup_64:
 47
 48	/*
 49	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
 50	 * and someone has loaded an identity mapped page table
 51	 * for us.  These identity mapped page tables map all of the
 52	 * kernel pages and possibly all of memory.
 53	 *
 54	 * %esi holds a physical pointer to real_mode_data.
 55	 *
 56	 * We come here either directly from a 64bit bootloader, or from
 57	 * arch/x86_64/boot/compressed/head.S.
 58	 *
 59	 * We only come here initially at boot nothing else comes here.
 60	 *
 61	 * Since we may be loaded at an address different from what we were
 62	 * compiled to run at we first fixup the physical addresses in our page
 63	 * tables and then reload them.
 64	 */
 65
 66	/* Compute the delta between the address I am compiled to run at and the
 67	 * address I am actually running at.
 68	 */
 69	leaq	_text(%rip), %rbp
 70	subq	$_text - __START_KERNEL_map, %rbp
 71
 72	/* Is the address not 2M aligned? */
 73	movq	%rbp, %rax
 74	andl	$~PMD_PAGE_MASK, %eax
 75	testl	%eax, %eax
 76	jnz	bad_address
 77
 78	/* Is the address too large? */
 79	leaq	_text(%rip), %rdx
 80	movq	$PGDIR_SIZE, %rax
 81	cmpq	%rax, %rdx
 82	jae	bad_address
 83
 84	/* Fixup the physical addresses in the page table
 85	 */
 86	addq	%rbp, init_level4_pgt + 0(%rip)
 87	addq	%rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
 88	addq	%rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
 
 
 
 
 
 
 
 
 
 
 
 
 
 89
 90	addq	%rbp, level3_ident_pgt + 0(%rip)
 
 
 
 
 
 
 
 
 
 
 
 91
 92	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
 93	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
 
 
 
 94
 95	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
 
 96
 97	/* Add an Identity mapping if I am above 1G */
 98	leaq	_text(%rip), %rdi
 99	andq	$PMD_PAGE_MASK, %rdi
100
101	movq	%rdi, %rax
102	shrq	$PUD_SHIFT, %rax
103	andq	$(PTRS_PER_PUD - 1), %rax
104	jz	ident_complete
105
106	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
107	leaq	level3_ident_pgt(%rip), %rbx
108	movq	%rdx, 0(%rbx, %rax, 8)
109
110	movq	%rdi, %rax
111	shrq	$PMD_SHIFT, %rax
112	andq	$(PTRS_PER_PMD - 1), %rax
113	leaq	__PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
114	leaq	level2_spare_pgt(%rip), %rbx
115	movq	%rdx, 0(%rbx, %rax, 8)
116ident_complete:
117
118	/*
119	 * Fixup the kernel text+data virtual addresses. Note that
120	 * we might write invalid pmds, when the kernel is relocated
121	 * cleanup_highmap() fixes this up along with the mappings
122	 * beyond _end.
123	 */
124
125	leaq	level2_kernel_pgt(%rip), %rdi
126	leaq	4096(%rdi), %r8
127	/* See if it is a valid page table entry */
1281:	testq	$1, 0(%rdi)
129	jz	2f
130	addq	%rbp, 0(%rdi)
131	/* Go to the next page */
1322:	addq	$8, %rdi
133	cmp	%r8, %rdi
134	jne	1b
135
136	/* Fixup phys_base */
137	addq	%rbp, phys_base(%rip)
138
139	/* Fixup trampoline */
140	addq	%rbp, trampoline_level4_pgt + 0(%rip)
141	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
142
143	/* Due to ENTRY(), sometimes the empty space gets filled with
144	 * zeros. Better take a jmp than relying on empty space being
145	 * filled with 0x90 (nop)
146	 */
147	jmp secondary_startup_64
148ENTRY(secondary_startup_64)
 
 
 
 
 
 
 
 
 
 
 
149	/*
150	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
151	 * and someone has loaded a mapped page table.
152	 *
153	 * %esi holds a physical pointer to real_mode_data.
154	 *
155	 * We come here either from startup_64 (using physical addresses)
156	 * or from trampoline.S (using virtual addresses).
157	 *
158	 * Using virtual addresses from trampoline.S removes the need
159	 * to have any identity mapped pages in the kernel page table
160	 * after the boot processor executes this code.
161	 */
162
163	/* Enable PAE mode and PGE */
164	movl	$(X86_CR4_PAE | X86_CR4_PGE), %eax
165	movq	%rax, %cr4
166
167	/* Setup early boot stage 4 level pagetables. */
168	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
169	addq	phys_base(%rip), %rax
170	movq	%rax, %cr3
 
 
 
 
 
 
 
 
171
172	/* Ensure I am executing from virtual addresses */
173	movq	$1f, %rax
174	jmp	*%rax
 
 
 
 
 
 
 
 
 
1751:
176
177	/* Check if nx is implemented */
178	movl	$0x80000001, %eax
179	cpuid
180	movl	%edx,%edi
 
 
 
 
 
 
 
 
 
181
182	/* Setup EFER (Extended Feature Enable Register) */
183	movl	$MSR_EFER, %ecx
184	rdmsr
185	btsl	$_EFER_SCE, %eax	/* Enable System Call */
186	btl	$20,%edi		/* No Execute supported? */
187	jnc     1f
188	btsl	$_EFER_NX, %eax
1891:	wrmsr				/* Make changes effective */
 
190
191	/* Setup cr0 */
192#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
193			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
194			 X86_CR0_PG)
195	movl	$CR0_STATE, %eax
196	/* Make changes effective */
197	movq	%rax, %cr0
198
199	/* Setup a boot time stack */
200	movq stack_start(%rip),%rsp
 
 
 
 
 
 
 
 
 
 
201
202	/* zero EFLAGS after setting rsp */
203	pushq $0
204	popfq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
206	/*
207	 * We must switch to a new descriptor in kernel space for the GDT
208	 * because soon the kernel won't have access anymore to the userspace
209	 * addresses where we're currently running on. We have to do that here
210	 * because in 32bit we couldn't load a 64bit linear address.
211	 */
212	lgdt	early_gdt_descr(%rip)
213
214	/* set up data segments */
215	xorl %eax,%eax
216	movl %eax,%ds
217	movl %eax,%ss
218	movl %eax,%es
219
220	/*
221	 * We don't really need to load %fs or %gs, but load them anyway
222	 * to kill any stale realmode selectors.  This allows execution
223	 * under VT hardware.
224	 */
225	movl %eax,%fs
226	movl %eax,%gs
227
228	/* Set up %gs.
229	 *
230	 * The base of %gs always points to the bottom of the irqstack
231	 * union.  If the stack protector canary is enabled, it is
232	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
233	 * init data section till per cpu areas are set up.
234	 */
235	movl	$MSR_GS_BASE,%ecx
236	movl	initial_gs(%rip),%eax
237	movl	initial_gs+4(%rip),%edx
238	wrmsr	
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
240	/* esi is pointer to real mode structure with interesting info.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241	   pass it to C */
242	movl	%esi, %edi
243	
244	/* Finally jump to run C code and to be on real kernel address
 
 
245	 * Since we are running on identity-mapped space we have to jump
246	 * to the full 64bit address, this is only possible as indirect
247	 * jump.  In addition we need to ensure %cs is set so we make this
248	 * a far return.
249	 */
250	movq	initial_code(%rip),%rax
251	pushq	$0		# fake return address to stop unwinder
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252	pushq	$__KERNEL_CS	# set correct cs
253	pushq	%rax		# target address in negative space
254	lretq
 
 
 
 
 
 
255
256	/* SMP bootup changes these two */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257	__REFDATA
258	.align	8
259	ENTRY(initial_code)
260	.quad	x86_64_start_kernel
261	ENTRY(initial_gs)
262	.quad	INIT_PER_CPU_VAR(irq_stack_union)
263
264	ENTRY(stack_start)
265	.quad  init_thread_union+THREAD_SIZE-8
266	.word  0
267	__FINITDATA
268
269bad_address:
270	jmp bad_address
 
 
 
 
271
272	.section ".init.text","ax"
273#ifdef CONFIG_EARLY_PRINTK
274	.globl early_idt_handlers
275early_idt_handlers:
276	i = 0
277	.rept NUM_EXCEPTION_VECTORS
278	movl $i, %esi
279	jmp early_idt_handler
 
 
 
 
 
 
 
 
 
280	i = i + 1
 
281	.endr
282#endif
 
 
 
 
 
 
 
 
 
 
283
284ENTRY(early_idt_handler)
285#ifdef CONFIG_EARLY_PRINTK
286	cmpl $2,early_recursion_flag(%rip)
287	jz  1f
288	incl early_recursion_flag(%rip)
289	GET_CR2_INTO_RCX
290	movq %rcx,%r9
291	xorl %r8d,%r8d		# zero for error code
292	movl %esi,%ecx		# get vector number
293	# Test %ecx against mask of vectors that push error code.
294	cmpl $31,%ecx
295	ja 0f
296	movl $1,%eax
297	salq %cl,%rax
298	testl $0x27d00,%eax
299	je 0f
300	popq %r8		# get error code
3010:	movq 0(%rsp),%rcx	# get ip
302	movq 8(%rsp),%rdx	# get cs
303	xorl %eax,%eax
304	leaq early_idt_msg(%rip),%rdi
305	call early_printk
306	cmpl $2,early_recursion_flag(%rip)
307	jz  1f
308	call dump_stack
309#ifdef CONFIG_KALLSYMS	
310	leaq early_idt_ripmsg(%rip),%rdi
311	movq 0(%rsp),%rsi	# get rip again
312	call __print_symbol
313#endif
314#endif /* EARLY_PRINTK */
3151:	hlt
316	jmp 1b
317
318#ifdef CONFIG_EARLY_PRINTK
319early_recursion_flag:
320	.long 0
321
322early_idt_msg:
323	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
324early_idt_ripmsg:
325	.asciz "RIP %s\n"
326#endif /* CONFIG_EARLY_PRINTK */
327	.previous
328
329#define NEXT_PAGE(name) \
330	.balign	PAGE_SIZE; \
331ENTRY(name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
333/* Automate the creation of 1 to 1 mapping pmd entries */
334#define PMDS(START, PERM, COUNT)			\
335	i = 0 ;						\
336	.rept (COUNT) ;					\
337	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
338	i = i + 1 ;					\
339	.endr
340
341	.data
342	/*
343	 * This default setting generates an ident mapping at address 0x100000
344	 * and a mapping for the kernel that precisely maps virtual address
345	 * 0xffffffff80000000 to physical address 0x000000. (always using
346	 * 2Mbyte large pages provided by PAE mode)
347	 */
348NEXT_PAGE(init_level4_pgt)
349	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
350	.org	init_level4_pgt + L4_PAGE_OFFSET*8, 0
351	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
352	.org	init_level4_pgt + L4_START_KERNEL*8, 0
353	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
354	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
355
356NEXT_PAGE(level3_ident_pgt)
357	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
358	.fill	511,8,0
 
359
360NEXT_PAGE(level3_kernel_pgt)
361	.fill	L3_START_KERNEL,8,0
362	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
363	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
364	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
365
366NEXT_PAGE(level2_fixmap_pgt)
367	.fill	506,8,0
368	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
369	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
370	.fill	5,8,0
371
372NEXT_PAGE(level1_fixmap_pgt)
373	.fill	512,8,0
374
375NEXT_PAGE(level2_ident_pgt)
376	/* Since I easily can, map the first 1G.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377	 * Don't set NX because code runs from these pages.
 
 
 
 
378	 */
379	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
381NEXT_PAGE(level2_kernel_pgt)
382	/*
383	 * 512 MB kernel mapping. We spend a full page on this pagetable
384	 * anyway.
385	 *
386	 * The kernel code+data+bss must not be bigger than that.
 
 
387	 *
388	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
389	 *  If you want to increase this then increase MODULES_VADDR
390	 *  too.)
391	 */
392	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
393		KERNEL_IMAGE_SIZE/PMD_SIZE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
395NEXT_PAGE(level2_spare_pgt)
396	.fill   512, 8, 0
 
 
 
397
398#undef PMDS
399#undef NEXT_PAGE
400
401	.data
402	.align 16
403	.globl early_gdt_descr
404early_gdt_descr:
405	.word	GDT_ENTRIES*8-1
406early_gdt_descr_base:
407	.quad	INIT_PER_CPU_VAR(gdt_page)
408
409ENTRY(phys_base)
410	/* This must match the first entry in level2_kernel_pgt */
411	.quad   0x0000000000000000
412
413#include "../../x86/xen/xen-head.S"
414	
415	.section .bss, "aw", @nobits
416	.align L1_CACHE_BYTES
417ENTRY(idt_table)
418	.skip IDT_ENTRIES * 16
419
420	__PAGE_ALIGNED_BSS
421	.align PAGE_SIZE
422ENTRY(empty_zero_page)
423	.skip PAGE_SIZE
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
  4 *
  5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  6 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  8 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  9 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
 10 */
 11
 12
 13#include <linux/linkage.h>
 14#include <linux/threads.h>
 15#include <linux/init.h>
 16#include <linux/pgtable.h>
 17#include <asm/segment.h>
 
 18#include <asm/page.h>
 19#include <asm/msr.h>
 20#include <asm/cache.h>
 21#include <asm/processor-flags.h>
 22#include <asm/percpu.h>
 23#include <asm/nops.h>
 24#include "../entry/calling.h"
 25#include <asm/export.h>
 26#include <asm/nospec-branch.h>
 27#include <asm/fixmap.h>
 28
 29/*
 30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
 
 
 
 
 
 
 31 * because we need identity-mapped pages.
 
 32 */
 33#define l4_index(x)	(((x) >> 39) & 511)
 34#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 35
 36L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
 37L4_START_KERNEL = l4_index(__START_KERNEL_map)
 38
 39L3_START_KERNEL = pud_index(__START_KERNEL_map)
 40
 41	.text
 42	__HEAD
 43	.code64
 44SYM_CODE_START_NOALIGN(startup_64)
 45	UNWIND_HINT_EMPTY
 
 46	/*
 47	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
 48	 * and someone has loaded an identity mapped page table
 49	 * for us.  These identity mapped page tables map all of the
 50	 * kernel pages and possibly all of memory.
 51	 *
 52	 * %rsi holds a physical pointer to real_mode_data.
 53	 *
 54	 * We come here either directly from a 64bit bootloader, or from
 55	 * arch/x86/boot/compressed/head_64.S.
 56	 *
 57	 * We only come here initially at boot nothing else comes here.
 58	 *
 59	 * Since we may be loaded at an address different from what we were
 60	 * compiled to run at we first fixup the physical addresses in our page
 61	 * tables and then reload them.
 62	 */
 63
 64	/* Set up the stack for verify_cpu(), similar to initial_stack below */
 65	leaq	(__end_init_task - FRAME_SIZE)(%rip), %rsp
 
 
 
 
 
 
 
 
 
 66
 67	leaq	_text(%rip), %rdi
 
 
 
 
 68
 69	/*
 70	 * initial_gs points to initial fixed_percpu_data struct with storage for
 71	 * the stack protector canary. Global pointer fixups are needed at this
 72	 * stage, so apply them as is done in fixup_pointer(), and initialize %gs
 73	 * such that the canary can be accessed at %gs:40 for subsequent C calls.
 74	 */
 75	movl	$MSR_GS_BASE, %ecx
 76	movq	initial_gs(%rip), %rax
 77	movq	$_text, %rdx
 78	subq	%rdx, %rax
 79	addq	%rdi, %rax
 80	movq	%rax, %rdx
 81	shrq	$32,  %rdx
 82	wrmsr
 83
 84	pushq	%rsi
 85	call	startup_64_setup_env
 86	popq	%rsi
 87
 88#ifdef CONFIG_AMD_MEM_ENCRYPT
 89	/*
 90	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
 91	 * be done now, since this also includes setup of the SEV-SNP CPUID table,
 92	 * which needs to be done before any CPUID instructions are executed in
 93	 * subsequent code.
 94	 */
 95	movq	%rsi, %rdi
 96	pushq	%rsi
 97	call	sme_enable
 98	popq	%rsi
 99#endif
100
101	/* Now switch to __KERNEL_CS so IRET works reliably */
102	pushq	$__KERNEL_CS
103	leaq	.Lon_kernel_cs(%rip), %rax
104	pushq	%rax
105	lretq
106
107.Lon_kernel_cs:
108	UNWIND_HINT_EMPTY
109
110	/* Sanitize CPU configuration */
111	call verify_cpu
 
112
113	/*
114	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
115	 * the kernel and retrieve the modifier (SME encryption mask if SME
116	 * is active) to be added to the initial pgdir entry that will be
117	 * programmed into CR3.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118	 */
119	leaq	_text(%rip), %rdi
120	pushq	%rsi
121	call	__startup_64
122	popq	%rsi
123
124	/* Form the CR3 value being sure to include the CR3 modifier */
125	addq	$(early_top_pgt - __START_KERNEL_map), %rax
126	jmp 1f
127SYM_CODE_END(startup_64)
128
129SYM_CODE_START(secondary_startup_64)
130	UNWIND_HINT_EMPTY
131	ANNOTATE_NOENDBR
132	/*
133	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
134	 * and someone has loaded a mapped page table.
135	 *
136	 * %rsi holds a physical pointer to real_mode_data.
137	 *
138	 * We come here either from startup_64 (using physical addresses)
139	 * or from trampoline.S (using virtual addresses).
140	 *
141	 * Using virtual addresses from trampoline.S removes the need
142	 * to have any identity mapped pages in the kernel page table
143	 * after the boot processor executes this code.
144	 */
145
146	/* Sanitize CPU configuration */
147	call verify_cpu
 
148
149	/*
150	 * The secondary_startup_64_no_verify entry point is only used by
151	 * SEV-ES guests. In those guests the call to verify_cpu() would cause
152	 * #VC exceptions which can not be handled at this stage of secondary
153	 * CPU bringup.
154	 *
155	 * All non SEV-ES systems, especially Intel systems, need to execute
156	 * verify_cpu() above to make sure NX is enabled.
157	 */
158SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
159	UNWIND_HINT_EMPTY
160	ANNOTATE_NOENDBR
161
162	/*
163	 * Retrieve the modifier (SME encryption mask if SME is active) to be
164	 * added to the initial pgdir entry that will be programmed into CR3.
165	 */
166#ifdef CONFIG_AMD_MEM_ENCRYPT
167	movq	sme_me_mask, %rax
168#else
169	xorq	%rax, %rax
170#endif
171
172	/* Form the CR3 value being sure to include the CR3 modifier */
173	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1741:
175
176#ifdef CONFIG_X86_MCE
177	/*
178	 * Preserve CR4.MCE if the kernel will enable #MC support.
179	 * Clearing MCE may fault in some environments (that also force #MC
180	 * support). Any machine check that occurs before #MC support is fully
181	 * configured will crash the system regardless of the CR4.MCE value set
182	 * here.
183	 */
184	movq	%cr4, %rcx
185	andl	$X86_CR4_MCE, %ecx
186#else
187	movl	$0, %ecx
188#endif
189
190	/* Enable PAE mode, PGE and LA57 */
191	orl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
192#ifdef CONFIG_X86_5LEVEL
193	testl	$1, __pgtable_l5_enabled(%rip)
194	jz	1f
195	orl	$X86_CR4_LA57, %ecx
1961:
197#endif
198	movq	%rcx, %cr4
199
200	/* Setup early boot stage 4-/5-level pagetables. */
201	addq	phys_base(%rip), %rax
 
 
 
 
 
202
203	/*
204	 * For SEV guests: Verify that the C-bit is correct. A malicious
205	 * hypervisor could lie about the C-bit position to perform a ROP
206	 * attack on the guest by writing to the unencrypted stack and wait for
207	 * the next RET instruction.
208	 * %rsi carries pointer to realmode data and is callee-clobbered. Save
209	 * and restore it.
210	 */
211	pushq	%rsi
212	movq	%rax, %rdi
213	call	sev_verify_cbit
214	popq	%rsi
215
216	/*
217	 * Switch to new page-table
218	 *
219	 * For the boot CPU this switches to early_top_pgt which still has the
220	 * indentity mappings present. The secondary CPUs will switch to the
221	 * init_top_pgt here, away from the trampoline_pgd and unmap the
222	 * indentity mapped ranges.
223	 */
224	movq	%rax, %cr3
225
226	/*
227	 * Do a global TLB flush after the CR3 switch to make sure the TLB
228	 * entries from the identity mapping are flushed.
229	 */
230	movq	%cr4, %rcx
231	movq	%rcx, %rax
232	xorq	$X86_CR4_PGE, %rcx
233	movq	%rcx, %cr4
234	movq	%rax, %cr4
235
236	/* Ensure I am executing from virtual addresses */
237	movq	$1f, %rax
238	ANNOTATE_RETPOLINE_SAFE
239	jmp	*%rax
2401:
241	UNWIND_HINT_EMPTY
242	ANNOTATE_NOENDBR // above
243
244	/*
245	 * We must switch to a new descriptor in kernel space for the GDT
246	 * because soon the kernel won't have access anymore to the userspace
247	 * addresses where we're currently running on. We have to do that here
248	 * because in 32bit we couldn't load a 64bit linear address.
249	 */
250	lgdt	early_gdt_descr(%rip)
251
252	/* set up data segments */
253	xorl %eax,%eax
254	movl %eax,%ds
255	movl %eax,%ss
256	movl %eax,%es
257
258	/*
259	 * We don't really need to load %fs or %gs, but load them anyway
260	 * to kill any stale realmode selectors.  This allows execution
261	 * under VT hardware.
262	 */
263	movl %eax,%fs
264	movl %eax,%gs
265
266	/* Set up %gs.
267	 *
268	 * The base of %gs always points to fixed_percpu_data. If the
269	 * stack protector canary is enabled, it is located at %gs:40.
270	 * Note that, on SMP, the boot cpu uses init data section until
271	 * the per cpu areas are set up.
272	 */
273	movl	$MSR_GS_BASE,%ecx
274	movl	initial_gs(%rip),%eax
275	movl	initial_gs+4(%rip),%edx
276	wrmsr
277
278	/*
279	 * Setup a boot time stack - Any secondary CPU will have lost its stack
280	 * by now because the cr3-switch above unmaps the real-mode stack
281	 */
282	movq initial_stack(%rip), %rsp
283
284	/* Setup and Load IDT */
285	pushq	%rsi
286	call	early_setup_idt
287	popq	%rsi
288
289	/* Check if nx is implemented */
290	movl	$0x80000001, %eax
291	cpuid
292	movl	%edx,%edi
293
294	/* Setup EFER (Extended Feature Enable Register) */
295	movl	$MSR_EFER, %ecx
296	rdmsr
297	/*
298	 * Preserve current value of EFER for comparison and to skip
299	 * EFER writes if no change was made (for TDX guest)
300	 */
301	movl    %eax, %edx
302	btsl	$_EFER_SCE, %eax	/* Enable System Call */
303	btl	$20,%edi		/* No Execute supported? */
304	jnc     1f
305	btsl	$_EFER_NX, %eax
306	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
307
308	/* Avoid writing EFER if no change was made (for TDX guest) */
3091:	cmpl	%edx, %eax
310	je	1f
311	xor	%edx, %edx
312	wrmsr				/* Make changes effective */
3131:
314	/* Setup cr0 */
315	movl	$CR0_STATE, %eax
316	/* Make changes effective */
317	movq	%rax, %cr0
318
319	/* zero EFLAGS after setting rsp */
320	pushq $0
321	popfq
322
323	/* rsi is pointer to real mode structure with interesting info.
324	   pass it to C */
325	movq	%rsi, %rdi
326
327.Ljump_to_C_code:
328	/*
329	 * Jump to run C code and to be on a real kernel address.
330	 * Since we are running on identity-mapped space we have to jump
331	 * to the full 64bit address, this is only possible as indirect
332	 * jump.  In addition we need to ensure %cs is set so we make this
333	 * a far return.
334	 *
335	 * Note: do not change to far jump indirect with 64bit offset.
336	 *
337	 * AMD does not support far jump indirect with 64bit offset.
338	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
339	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
340	 *		with the target specified by a far pointer in memory.
341	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
342	 *		with the target specified by a far pointer in memory.
343	 *
344	 * Intel64 does support 64bit offset.
345	 * Software Developer Manual Vol 2: states:
346	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
347	 *		address given in m16:16
348	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
349	 *		address given in m16:32.
350	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
351	 *		address given in m16:64.
352	 */
353	pushq	$.Lafter_lret	# put return address on stack for unwinder
354	xorl	%ebp, %ebp	# clear frame pointer
355	movq	initial_code(%rip), %rax
356	pushq	$__KERNEL_CS	# set correct cs
357	pushq	%rax		# target address in negative space
358	lretq
359.Lafter_lret:
360	ANNOTATE_NOENDBR
361SYM_CODE_END(secondary_startup_64)
362
363#include "verify_cpu.S"
364#include "sev_verify_cbit.S"
365
366#ifdef CONFIG_HOTPLUG_CPU
367/*
368 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
369 * up already except stack. We just set up stack here. Then call
370 * start_secondary() via .Ljump_to_C_code.
371 */
372SYM_CODE_START(start_cpu0)
373	ANNOTATE_NOENDBR
374	UNWIND_HINT_EMPTY
375	movq	initial_stack(%rip), %rsp
376	jmp	.Ljump_to_C_code
377SYM_CODE_END(start_cpu0)
378#endif
379
380#ifdef CONFIG_AMD_MEM_ENCRYPT
381/*
382 * VC Exception handler used during early boot when running on kernel
383 * addresses, but before the switch to the idt_table can be made.
384 * The early_idt_handler_array can't be used here because it calls into a lot
385 * of __init code and this handler is also used during CPU offlining/onlining.
386 * Therefore this handler ends up in the .text section so that it stays around
387 * when .init.text is freed.
388 */
389SYM_CODE_START_NOALIGN(vc_boot_ghcb)
390	UNWIND_HINT_IRET_REGS offset=8
391	ENDBR
392
393	ANNOTATE_UNRET_END
394
395	/* Build pt_regs */
396	PUSH_AND_CLEAR_REGS
397
398	/* Call C handler */
399	movq    %rsp, %rdi
400	movq	ORIG_RAX(%rsp), %rsi
401	movq	initial_vc_handler(%rip), %rax
402	ANNOTATE_RETPOLINE_SAFE
403	call	*%rax
404
405	/* Unwind pt_regs */
406	POP_REGS
407
408	/* Remove Error Code */
409	addq    $8, %rsp
410
411	iretq
412SYM_CODE_END(vc_boot_ghcb)
413#endif
414
415	/* Both SMP bootup and ACPI suspend change these variables */
416	__REFDATA
417	.balign	8
418SYM_DATA(initial_code,	.quad x86_64_start_kernel)
419SYM_DATA(initial_gs,	.quad INIT_PER_CPU_VAR(fixed_percpu_data))
420#ifdef CONFIG_AMD_MEM_ENCRYPT
421SYM_DATA(initial_vc_handler,	.quad handle_vc_boot_ghcb)
422#endif
 
 
 
 
423
424/*
425 * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder
426 * reliably detect the end of the stack.
427 */
428SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
429	__FINITDATA
430
431	__INIT
432SYM_CODE_START(early_idt_handler_array)
 
 
433	i = 0
434	.rept NUM_EXCEPTION_VECTORS
435	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
436		UNWIND_HINT_IRET_REGS
437		ENDBR
438		pushq $0	# Dummy error code, to make stack frame uniform
439	.else
440		UNWIND_HINT_IRET_REGS offset=8
441		ENDBR
442	.endif
443	pushq $i		# 72(%rsp) Vector number
444	jmp early_idt_handler_common
445	UNWIND_HINT_IRET_REGS
446	i = i + 1
447	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
448	.endr
449SYM_CODE_END(early_idt_handler_array)
450	ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
451
452SYM_CODE_START_LOCAL(early_idt_handler_common)
453	UNWIND_HINT_IRET_REGS offset=16
454	ANNOTATE_UNRET_END
455	/*
456	 * The stack is the hardware frame, an error code or zero, and the
457	 * vector number.
458	 */
459	cld
460
 
 
 
 
461	incl early_recursion_flag(%rip)
462
463	/* The vector number is currently in the pt_regs->di slot. */
464	pushq %rsi				/* pt_regs->si */
465	movq 8(%rsp), %rsi			/* RSI = vector number */
466	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
467	pushq %rdx				/* pt_regs->dx */
468	pushq %rcx				/* pt_regs->cx */
469	pushq %rax				/* pt_regs->ax */
470	pushq %r8				/* pt_regs->r8 */
471	pushq %r9				/* pt_regs->r9 */
472	pushq %r10				/* pt_regs->r10 */
473	pushq %r11				/* pt_regs->r11 */
474	pushq %rbx				/* pt_regs->bx */
475	pushq %rbp				/* pt_regs->bp */
476	pushq %r12				/* pt_regs->r12 */
477	pushq %r13				/* pt_regs->r13 */
478	pushq %r14				/* pt_regs->r14 */
479	pushq %r15				/* pt_regs->r15 */
480	UNWIND_HINT_REGS
481
482	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
483	call do_early_exception
484
485	decl early_recursion_flag(%rip)
486	jmp restore_regs_and_return_to_kernel
487SYM_CODE_END(early_idt_handler_common)
488
489#ifdef CONFIG_AMD_MEM_ENCRYPT
490/*
491 * VC Exception handler used during very early boot. The
492 * early_idt_handler_array can't be used because it returns via the
493 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
494 *
495 * XXX it does, fix this.
496 *
497 * This handler will end up in the .init.text section and not be
498 * available to boot secondary CPUs.
499 */
500SYM_CODE_START_NOALIGN(vc_no_ghcb)
501	UNWIND_HINT_IRET_REGS offset=8
502	ENDBR
503
504	ANNOTATE_UNRET_END
505
506	/* Build pt_regs */
507	PUSH_AND_CLEAR_REGS
508
509	/* Call C handler */
510	movq    %rsp, %rdi
511	movq	ORIG_RAX(%rsp), %rsi
512	call    do_vc_no_ghcb
513
514	/* Unwind pt_regs */
515	POP_REGS
516
517	/* Remove Error Code */
518	addq    $8, %rsp
519
520	/* Pure iret required here - don't use INTERRUPT_RETURN */
521	iretq
522SYM_CODE_END(vc_no_ghcb)
523#endif
524
525#define SYM_DATA_START_PAGE_ALIGNED(name)			\
526	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
527
528#ifdef CONFIG_PAGE_TABLE_ISOLATION
529/*
530 * Each PGD needs to be 8k long and 8k aligned.  We do not
531 * ever go out to userspace with these, so we do not
532 * strictly *need* the second page, but this allows us to
533 * have a single set_pgd() implementation that does not
534 * need to worry about whether it has 4k or 8k to work
535 * with.
536 *
537 * This ensures PGDs are 8k long:
538 */
539#define PTI_USER_PGD_FILL	512
540/* This ensures they are 8k-aligned: */
541#define SYM_DATA_START_PTI_ALIGNED(name) \
542	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
543#else
544#define SYM_DATA_START_PTI_ALIGNED(name) \
545	SYM_DATA_START_PAGE_ALIGNED(name)
546#define PTI_USER_PGD_FILL	0
547#endif
548
549/* Automate the creation of 1 to 1 mapping pmd entries */
550#define PMDS(START, PERM, COUNT)			\
551	i = 0 ;						\
552	.rept (COUNT) ;					\
553	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
554	i = i + 1 ;					\
555	.endr
556
557	__INITDATA
558	.balign 4
 
 
 
 
 
 
 
 
 
 
 
 
559
560SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
561	.fill	512,8,0
562	.fill	PTI_USER_PGD_FILL,8,0
563SYM_DATA_END(early_top_pgt)
564
565SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
566	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
567SYM_DATA_END(early_dynamic_pgts)
 
 
568
569SYM_DATA(early_recursion_flag, .long 0)
 
 
 
 
570
571	.data
 
572
573#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
574SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
575	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
576	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
577	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
578	.org    init_top_pgt + L4_START_KERNEL*8, 0
579	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
580	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
581	.fill	PTI_USER_PGD_FILL,8,0
582SYM_DATA_END(init_top_pgt)
583
584SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
585	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
586	.fill	511, 8, 0
587SYM_DATA_END(level3_ident_pgt)
588SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
589	/*
590	 * Since I easily can, map the first 1G.
591	 * Don't set NX because code runs from these pages.
592	 *
593	 * Note: This sets _PAGE_GLOBAL despite whether
594	 * the CPU supports it or it is enabled.  But,
595	 * the CPU should ignore the bit.
596	 */
597	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
598SYM_DATA_END(level2_ident_pgt)
599#else
600SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
601	.fill	512,8,0
602	.fill	PTI_USER_PGD_FILL,8,0
603SYM_DATA_END(init_top_pgt)
604#endif
605
606#ifdef CONFIG_X86_5LEVEL
607SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
608	.fill	511,8,0
609	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
610SYM_DATA_END(level4_kernel_pgt)
611#endif
612
613SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
614	.fill	L3_START_KERNEL,8,0
615	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
616	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
617	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
618SYM_DATA_END(level3_kernel_pgt)
619
620SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
621	/*
622	 * Kernel high mapping.
 
623	 *
624	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
625	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
626	 * 512 MiB otherwise.
627	 *
628	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
629	 *
630	 * This table is eventually used by the kernel during normal runtime.
631	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
632	 * or _PAGE_GLOBAL in some cases.
633	 */
634	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
635SYM_DATA_END(level2_kernel_pgt)
636
637SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
638	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
639	pgtno = 0
640	.rept (FIXMAP_PMD_NUM)
641	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
642		+ _PAGE_TABLE_NOENC;
643	pgtno = pgtno + 1
644	.endr
645	/* 6 MB reserved space + a 2MB hole */
646	.fill	4,8,0
647SYM_DATA_END(level2_fixmap_pgt)
648
649SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
650	.rept (FIXMAP_PMD_NUM)
651	.fill	512,8,0
652	.endr
653SYM_DATA_END(level1_fixmap_pgt)
654
655#undef PMDS
 
656
657	.data
658	.align 16
659
660SYM_DATA(early_gdt_descr,		.word GDT_ENTRIES*8-1)
661SYM_DATA_LOCAL(early_gdt_descr_base,	.quad INIT_PER_CPU_VAR(gdt_page))
662
663	.align 16
664/* This must match the first entry in level2_kernel_pgt */
665SYM_DATA(phys_base, .quad 0x0)
666EXPORT_SYMBOL(phys_base)
 
667
668#include "../../x86/xen/xen-head.S"
 
 
 
 
 
669
670	__PAGE_ALIGNED_BSS
671SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
 
672	.skip PAGE_SIZE
673SYM_DATA_END(empty_zero_page)
674EXPORT_SYMBOL(empty_zero_page)
675