Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v3.1
  1/*
  2 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
  3 *
  4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9 */
 10
 11
 12#include <linux/linkage.h>
 13#include <linux/threads.h>
 14#include <linux/init.h>
 15#include <asm/segment.h>
 16#include <asm/pgtable.h>
 17#include <asm/page.h>
 18#include <asm/msr.h>
 19#include <asm/cache.h>
 20#include <asm/processor-flags.h>
 21#include <asm/percpu.h>
 
 22
 23#ifdef CONFIG_PARAVIRT
 24#include <asm/asm-offsets.h>
 25#include <asm/paravirt.h>
 
 26#else
 27#define GET_CR2_INTO_RCX movq %cr2, %rcx
 
 28#endif
 29
 30/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
 31 * because we need identity-mapped pages.
 32 *
 33 */
 34
 35#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 36
 37L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
 38L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
 39L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 40L3_START_KERNEL = pud_index(__START_KERNEL_map)
 41
 42	.text
 43	__HEAD
 44	.code64
 45	.globl startup_64
 46startup_64:
 47
 48	/*
 49	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
 50	 * and someone has loaded an identity mapped page table
 51	 * for us.  These identity mapped page tables map all of the
 52	 * kernel pages and possibly all of memory.
 53	 *
 54	 * %esi holds a physical pointer to real_mode_data.
 55	 *
 56	 * We come here either directly from a 64bit bootloader, or from
 57	 * arch/x86_64/boot/compressed/head.S.
 58	 *
 59	 * We only come here initially at boot nothing else comes here.
 60	 *
 61	 * Since we may be loaded at an address different from what we were
 62	 * compiled to run at we first fixup the physical addresses in our page
 63	 * tables and then reload them.
 64	 */
 65
 66	/* Compute the delta between the address I am compiled to run at and the
 
 
 
 
 67	 * address I am actually running at.
 68	 */
 69	leaq	_text(%rip), %rbp
 70	subq	$_text - __START_KERNEL_map, %rbp
 71
 72	/* Is the address not 2M aligned? */
 73	movq	%rbp, %rax
 74	andl	$~PMD_PAGE_MASK, %eax
 75	testl	%eax, %eax
 76	jnz	bad_address
 77
 78	/* Is the address too large? */
 79	leaq	_text(%rip), %rdx
 80	movq	$PGDIR_SIZE, %rax
 81	cmpq	%rax, %rdx
 82	jae	bad_address
 83
 84	/* Fixup the physical addresses in the page table
 85	 */
 86	addq	%rbp, init_level4_pgt + 0(%rip)
 87	addq	%rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
 88	addq	%rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
 89
 90	addq	%rbp, level3_ident_pgt + 0(%rip)
 
 
 
 91
 92	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
 93	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
 94
 95	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
 96
 97	/* Add an Identity mapping if I am above 1G */
 
 
 
 
 
 98	leaq	_text(%rip), %rdi
 99	andq	$PMD_PAGE_MASK, %rdi
100
101	movq	%rdi, %rax
102	shrq	$PUD_SHIFT, %rax
103	andq	$(PTRS_PER_PUD - 1), %rax
104	jz	ident_complete
 
 
105
106	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
107	leaq	level3_ident_pgt(%rip), %rbx
108	movq	%rdx, 0(%rbx, %rax, 8)
 
 
 
 
 
109
 
110	movq	%rdi, %rax
111	shrq	$PMD_SHIFT, %rax
112	andq	$(PTRS_PER_PMD - 1), %rax
113	leaq	__PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
114	leaq	level2_spare_pgt(%rip), %rbx
115	movq	%rdx, 0(%rbx, %rax, 8)
116ident_complete:
 
 
 
 
 
 
 
 
117
118	/*
119	 * Fixup the kernel text+data virtual addresses. Note that
120	 * we might write invalid pmds, when the kernel is relocated
121	 * cleanup_highmap() fixes this up along with the mappings
122	 * beyond _end.
123	 */
124
125	leaq	level2_kernel_pgt(%rip), %rdi
126	leaq	4096(%rdi), %r8
127	/* See if it is a valid page table entry */
1281:	testq	$1, 0(%rdi)
129	jz	2f
130	addq	%rbp, 0(%rdi)
131	/* Go to the next page */
1322:	addq	$8, %rdi
133	cmp	%r8, %rdi
134	jne	1b
135
136	/* Fixup phys_base */
137	addq	%rbp, phys_base(%rip)
138
139	/* Fixup trampoline */
140	addq	%rbp, trampoline_level4_pgt + 0(%rip)
141	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
142
143	/* Due to ENTRY(), sometimes the empty space gets filled with
144	 * zeros. Better take a jmp than relying on empty space being
145	 * filled with 0x90 (nop)
146	 */
147	jmp secondary_startup_64
148ENTRY(secondary_startup_64)
149	/*
150	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
151	 * and someone has loaded a mapped page table.
152	 *
153	 * %esi holds a physical pointer to real_mode_data.
154	 *
155	 * We come here either from startup_64 (using physical addresses)
156	 * or from trampoline.S (using virtual addresses).
157	 *
158	 * Using virtual addresses from trampoline.S removes the need
159	 * to have any identity mapped pages in the kernel page table
160	 * after the boot processor executes this code.
161	 */
162
 
 
 
 
 
 
163	/* Enable PAE mode and PGE */
164	movl	$(X86_CR4_PAE | X86_CR4_PGE), %eax
165	movq	%rax, %cr4
166
167	/* Setup early boot stage 4 level pagetables. */
168	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
169	addq	phys_base(%rip), %rax
170	movq	%rax, %cr3
171
172	/* Ensure I am executing from virtual addresses */
173	movq	$1f, %rax
174	jmp	*%rax
1751:
176
177	/* Check if nx is implemented */
178	movl	$0x80000001, %eax
179	cpuid
180	movl	%edx,%edi
181
182	/* Setup EFER (Extended Feature Enable Register) */
183	movl	$MSR_EFER, %ecx
184	rdmsr
185	btsl	$_EFER_SCE, %eax	/* Enable System Call */
186	btl	$20,%edi		/* No Execute supported? */
187	jnc     1f
188	btsl	$_EFER_NX, %eax
 
1891:	wrmsr				/* Make changes effective */
190
191	/* Setup cr0 */
192#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
193			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
194			 X86_CR0_PG)
195	movl	$CR0_STATE, %eax
196	/* Make changes effective */
197	movq	%rax, %cr0
198
199	/* Setup a boot time stack */
200	movq stack_start(%rip),%rsp
201
202	/* zero EFLAGS after setting rsp */
203	pushq $0
204	popfq
205
206	/*
207	 * We must switch to a new descriptor in kernel space for the GDT
208	 * because soon the kernel won't have access anymore to the userspace
209	 * addresses where we're currently running on. We have to do that here
210	 * because in 32bit we couldn't load a 64bit linear address.
211	 */
212	lgdt	early_gdt_descr(%rip)
213
214	/* set up data segments */
215	xorl %eax,%eax
216	movl %eax,%ds
217	movl %eax,%ss
218	movl %eax,%es
219
220	/*
221	 * We don't really need to load %fs or %gs, but load them anyway
222	 * to kill any stale realmode selectors.  This allows execution
223	 * under VT hardware.
224	 */
225	movl %eax,%fs
226	movl %eax,%gs
227
228	/* Set up %gs.
229	 *
230	 * The base of %gs always points to the bottom of the irqstack
231	 * union.  If the stack protector canary is enabled, it is
232	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
233	 * init data section till per cpu areas are set up.
234	 */
235	movl	$MSR_GS_BASE,%ecx
236	movl	initial_gs(%rip),%eax
237	movl	initial_gs+4(%rip),%edx
238	wrmsr	
239
240	/* esi is pointer to real mode structure with interesting info.
241	   pass it to C */
242	movl	%esi, %edi
243	
244	/* Finally jump to run C code and to be on real kernel address
245	 * Since we are running on identity-mapped space we have to jump
246	 * to the full 64bit address, this is only possible as indirect
247	 * jump.  In addition we need to ensure %cs is set so we make this
248	 * a far return.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249	 */
250	movq	initial_code(%rip),%rax
251	pushq	$0		# fake return address to stop unwinder
252	pushq	$__KERNEL_CS	# set correct cs
253	pushq	%rax		# target address in negative space
254	lretq
255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256	/* SMP bootup changes these two */
257	__REFDATA
258	.align	8
259	ENTRY(initial_code)
260	.quad	x86_64_start_kernel
261	ENTRY(initial_gs)
262	.quad	INIT_PER_CPU_VAR(irq_stack_union)
263
264	ENTRY(stack_start)
265	.quad  init_thread_union+THREAD_SIZE-8
266	.word  0
267	__FINITDATA
268
269bad_address:
270	jmp bad_address
271
272	.section ".init.text","ax"
273#ifdef CONFIG_EARLY_PRINTK
274	.globl early_idt_handlers
275early_idt_handlers:
 
 
276	i = 0
277	.rept NUM_EXCEPTION_VECTORS
278	movl $i, %esi
279	jmp early_idt_handler
 
 
 
280	i = i + 1
 
281	.endr
282#endif
 
 
 
 
 
 
 
 
 
 
283
284ENTRY(early_idt_handler)
285#ifdef CONFIG_EARLY_PRINTK
286	cmpl $2,early_recursion_flag(%rip)
287	jz  1f
288	incl early_recursion_flag(%rip)
289	GET_CR2_INTO_RCX
290	movq %rcx,%r9
291	xorl %r8d,%r8d		# zero for error code
292	movl %esi,%ecx		# get vector number
293	# Test %ecx against mask of vectors that push error code.
294	cmpl $31,%ecx
295	ja 0f
296	movl $1,%eax
297	salq %cl,%rax
298	testl $0x27d00,%eax
299	je 0f
300	popq %r8		# get error code
3010:	movq 0(%rsp),%rcx	# get ip
302	movq 8(%rsp),%rdx	# get cs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303	xorl %eax,%eax
304	leaq early_idt_msg(%rip),%rdi
305	call early_printk
306	cmpl $2,early_recursion_flag(%rip)
307	jz  1f
308	call dump_stack
309#ifdef CONFIG_KALLSYMS	
310	leaq early_idt_ripmsg(%rip),%rdi
311	movq 0(%rsp),%rsi	# get rip again
312	call __print_symbol
313#endif
314#endif /* EARLY_PRINTK */
3151:	hlt
316	jmp 1b
317
318#ifdef CONFIG_EARLY_PRINTK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319early_recursion_flag:
320	.long 0
321
 
322early_idt_msg:
323	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
324early_idt_ripmsg:
325	.asciz "RIP %s\n"
326#endif /* CONFIG_EARLY_PRINTK */
327	.previous
328
329#define NEXT_PAGE(name) \
330	.balign	PAGE_SIZE; \
331ENTRY(name)
332
333/* Automate the creation of 1 to 1 mapping pmd entries */
334#define PMDS(START, PERM, COUNT)			\
335	i = 0 ;						\
336	.rept (COUNT) ;					\
337	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
338	i = i + 1 ;					\
339	.endr
340
 
 
 
 
 
 
 
 
341	.data
342	/*
343	 * This default setting generates an ident mapping at address 0x100000
344	 * and a mapping for the kernel that precisely maps virtual address
345	 * 0xffffffff80000000 to physical address 0x000000. (always using
346	 * 2Mbyte large pages provided by PAE mode)
347	 */
348NEXT_PAGE(init_level4_pgt)
349	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
350	.org	init_level4_pgt + L4_PAGE_OFFSET*8, 0
351	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
352	.org	init_level4_pgt + L4_START_KERNEL*8, 0
353	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
354	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
355
356NEXT_PAGE(level3_ident_pgt)
357	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
358	.fill	511,8,0
 
 
 
 
 
 
359
360NEXT_PAGE(level3_kernel_pgt)
361	.fill	L3_START_KERNEL,8,0
362	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
363	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
364	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
365
366NEXT_PAGE(level2_fixmap_pgt)
367	.fill	506,8,0
368	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
369	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
370	.fill	5,8,0
371
372NEXT_PAGE(level1_fixmap_pgt)
373	.fill	512,8,0
374
375NEXT_PAGE(level2_ident_pgt)
376	/* Since I easily can, map the first 1G.
377	 * Don't set NX because code runs from these pages.
378	 */
379	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
380
381NEXT_PAGE(level2_kernel_pgt)
382	/*
383	 * 512 MB kernel mapping. We spend a full page on this pagetable
384	 * anyway.
385	 *
386	 * The kernel code+data+bss must not be bigger than that.
387	 *
388	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
389	 *  If you want to increase this then increase MODULES_VADDR
390	 *  too.)
391	 */
392	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
393		KERNEL_IMAGE_SIZE/PMD_SIZE)
394
395NEXT_PAGE(level2_spare_pgt)
396	.fill   512, 8, 0
 
 
 
 
 
 
397
398#undef PMDS
399#undef NEXT_PAGE
400
401	.data
402	.align 16
403	.globl early_gdt_descr
404early_gdt_descr:
405	.word	GDT_ENTRIES*8-1
406early_gdt_descr_base:
407	.quad	INIT_PER_CPU_VAR(gdt_page)
408
409ENTRY(phys_base)
410	/* This must match the first entry in level2_kernel_pgt */
411	.quad   0x0000000000000000
412
413#include "../../x86/xen/xen-head.S"
414	
415	.section .bss, "aw", @nobits
416	.align L1_CACHE_BYTES
417ENTRY(idt_table)
418	.skip IDT_ENTRIES * 16
419
420	__PAGE_ALIGNED_BSS
421	.align PAGE_SIZE
422ENTRY(empty_zero_page)
423	.skip PAGE_SIZE
v4.6
  1/*
  2 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
  3 *
  4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
  7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
  8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
  9 */
 10
 11
 12#include <linux/linkage.h>
 13#include <linux/threads.h>
 14#include <linux/init.h>
 15#include <asm/segment.h>
 16#include <asm/pgtable.h>
 17#include <asm/page.h>
 18#include <asm/msr.h>
 19#include <asm/cache.h>
 20#include <asm/processor-flags.h>
 21#include <asm/percpu.h>
 22#include <asm/nops.h>
 23
 24#ifdef CONFIG_PARAVIRT
 25#include <asm/asm-offsets.h>
 26#include <asm/paravirt.h>
 27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
 28#else
 29#define GET_CR2_INTO(reg) movq %cr2, reg
 30#define INTERRUPT_RETURN iretq
 31#endif
 32
 33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
 34 * because we need identity-mapped pages.
 35 *
 36 */
 37
 38#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 39
 40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
 
 41L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 42L3_START_KERNEL = pud_index(__START_KERNEL_map)
 43
 44	.text
 45	__HEAD
 46	.code64
 47	.globl startup_64
 48startup_64:
 
 49	/*
 50	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
 51	 * and someone has loaded an identity mapped page table
 52	 * for us.  These identity mapped page tables map all of the
 53	 * kernel pages and possibly all of memory.
 54	 *
 55	 * %rsi holds a physical pointer to real_mode_data.
 56	 *
 57	 * We come here either directly from a 64bit bootloader, or from
 58	 * arch/x86/boot/compressed/head_64.S.
 59	 *
 60	 * We only come here initially at boot nothing else comes here.
 61	 *
 62	 * Since we may be loaded at an address different from what we were
 63	 * compiled to run at we first fixup the physical addresses in our page
 64	 * tables and then reload them.
 65	 */
 66
 67	/* Sanitize CPU configuration */
 68	call verify_cpu
 69
 70	/*
 71	 * Compute the delta between the address I am compiled to run at and the
 72	 * address I am actually running at.
 73	 */
 74	leaq	_text(%rip), %rbp
 75	subq	$_text - __START_KERNEL_map, %rbp
 76
 77	/* Is the address not 2M aligned? */
 78	testl	$~PMD_PAGE_MASK, %ebp
 
 
 79	jnz	bad_address
 80
 81	/*
 82	 * Is the address too large?
 83	 */
 84	leaq	_text(%rip), %rax
 85	shrq	$MAX_PHYSMEM_BITS, %rax
 86	jnz	bad_address
 
 
 
 
 
 87
 88	/*
 89	 * Fixup the physical addresses in the page table
 90	 */
 91	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
 92
 93	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
 94	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
 95
 96	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
 97
 98	/*
 99	 * Set up the identity mapping for the switchover.  These
100	 * entries should *NOT* have the global bit set!  This also
101	 * creates a bunch of nonsense entries but that is fine --
102	 * it avoids problems around wraparound.
103	 */
104	leaq	_text(%rip), %rdi
105	leaq	early_level4_pgt(%rip), %rbx
106
107	movq	%rdi, %rax
108	shrq	$PGDIR_SHIFT, %rax
109
110	leaq	(4096 + _KERNPG_TABLE)(%rbx), %rdx
111	movq	%rdx, 0(%rbx,%rax,8)
112	movq	%rdx, 8(%rbx,%rax,8)
113
114	addq	$4096, %rdx
115	movq	%rdi, %rax
116	shrq	$PUD_SHIFT, %rax
117	andl	$(PTRS_PER_PUD-1), %eax
118	movq	%rdx, 4096(%rbx,%rax,8)
119	incl	%eax
120	andl	$(PTRS_PER_PUD-1), %eax
121	movq	%rdx, 4096(%rbx,%rax,8)
122
123	addq	$8192, %rbx
124	movq	%rdi, %rax
125	shrq	$PMD_SHIFT, %rdi
126	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
127	leaq	(_end - 1)(%rip), %rcx
128	shrq	$PMD_SHIFT, %rcx
129	subq	%rdi, %rcx
130	incl	%ecx
131
1321:
133	andq	$(PTRS_PER_PMD - 1), %rdi
134	movq	%rax, (%rbx,%rdi,8)
135	incq	%rdi
136	addq	$PMD_SIZE, %rax
137	decl	%ecx
138	jnz	1b
139
140	/*
141	 * Fixup the kernel text+data virtual addresses. Note that
142	 * we might write invalid pmds, when the kernel is relocated
143	 * cleanup_highmap() fixes this up along with the mappings
144	 * beyond _end.
145	 */
 
146	leaq	level2_kernel_pgt(%rip), %rdi
147	leaq	4096(%rdi), %r8
148	/* See if it is a valid page table entry */
1491:	testb	$1, 0(%rdi)
150	jz	2f
151	addq	%rbp, 0(%rdi)
152	/* Go to the next page */
1532:	addq	$8, %rdi
154	cmp	%r8, %rdi
155	jne	1b
156
157	/* Fixup phys_base */
158	addq	%rbp, phys_base(%rip)
159
160	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
161	jmp 1f
 
 
 
 
 
 
 
162ENTRY(secondary_startup_64)
163	/*
164	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
165	 * and someone has loaded a mapped page table.
166	 *
167	 * %rsi holds a physical pointer to real_mode_data.
168	 *
169	 * We come here either from startup_64 (using physical addresses)
170	 * or from trampoline.S (using virtual addresses).
171	 *
172	 * Using virtual addresses from trampoline.S removes the need
173	 * to have any identity mapped pages in the kernel page table
174	 * after the boot processor executes this code.
175	 */
176
177	/* Sanitize CPU configuration */
178	call verify_cpu
179
180	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
1811:
182
183	/* Enable PAE mode and PGE */
184	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
185	movq	%rcx, %cr4
186
187	/* Setup early boot stage 4 level pagetables. */
 
188	addq	phys_base(%rip), %rax
189	movq	%rax, %cr3
190
191	/* Ensure I am executing from virtual addresses */
192	movq	$1f, %rax
193	jmp	*%rax
1941:
195
196	/* Check if nx is implemented */
197	movl	$0x80000001, %eax
198	cpuid
199	movl	%edx,%edi
200
201	/* Setup EFER (Extended Feature Enable Register) */
202	movl	$MSR_EFER, %ecx
203	rdmsr
204	btsl	$_EFER_SCE, %eax	/* Enable System Call */
205	btl	$20,%edi		/* No Execute supported? */
206	jnc     1f
207	btsl	$_EFER_NX, %eax
208	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
2091:	wrmsr				/* Make changes effective */
210
211	/* Setup cr0 */
212#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
213			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
214			 X86_CR0_PG)
215	movl	$CR0_STATE, %eax
216	/* Make changes effective */
217	movq	%rax, %cr0
218
219	/* Setup a boot time stack */
220	movq stack_start(%rip), %rsp
221
222	/* zero EFLAGS after setting rsp */
223	pushq $0
224	popfq
225
226	/*
227	 * We must switch to a new descriptor in kernel space for the GDT
228	 * because soon the kernel won't have access anymore to the userspace
229	 * addresses where we're currently running on. We have to do that here
230	 * because in 32bit we couldn't load a 64bit linear address.
231	 */
232	lgdt	early_gdt_descr(%rip)
233
234	/* set up data segments */
235	xorl %eax,%eax
236	movl %eax,%ds
237	movl %eax,%ss
238	movl %eax,%es
239
240	/*
241	 * We don't really need to load %fs or %gs, but load them anyway
242	 * to kill any stale realmode selectors.  This allows execution
243	 * under VT hardware.
244	 */
245	movl %eax,%fs
246	movl %eax,%gs
247
248	/* Set up %gs.
249	 *
250	 * The base of %gs always points to the bottom of the irqstack
251	 * union.  If the stack protector canary is enabled, it is
252	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
253	 * init data section till per cpu areas are set up.
254	 */
255	movl	$MSR_GS_BASE,%ecx
256	movl	initial_gs(%rip),%eax
257	movl	initial_gs+4(%rip),%edx
258	wrmsr	
259
260	/* rsi is pointer to real mode structure with interesting info.
261	   pass it to C */
262	movq	%rsi, %rdi
263	
264	/* Finally jump to run C code and to be on real kernel address
265	 * Since we are running on identity-mapped space we have to jump
266	 * to the full 64bit address, this is only possible as indirect
267	 * jump.  In addition we need to ensure %cs is set so we make this
268	 * a far return.
269	 *
270	 * Note: do not change to far jump indirect with 64bit offset.
271	 *
272	 * AMD does not support far jump indirect with 64bit offset.
273	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
274	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
275	 *		with the target specified by a far pointer in memory.
276	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
277	 *		with the target specified by a far pointer in memory.
278	 *
279	 * Intel64 does support 64bit offset.
280	 * Software Developer Manual Vol 2: states:
281	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
282	 *		address given in m16:16
283	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
284	 *		address given in m16:32.
285	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
286	 *		address given in m16:64.
287	 */
288	movq	initial_code(%rip),%rax
289	pushq	$0		# fake return address to stop unwinder
290	pushq	$__KERNEL_CS	# set correct cs
291	pushq	%rax		# target address in negative space
292	lretq
293
294#include "verify_cpu.S"
295
296#ifdef CONFIG_HOTPLUG_CPU
297/*
298 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
299 * up already except stack. We just set up stack here. Then call
300 * start_secondary().
301 */
302ENTRY(start_cpu0)
303	movq stack_start(%rip),%rsp
304	movq	initial_code(%rip),%rax
305	pushq	$0		# fake return address to stop unwinder
306	pushq	$__KERNEL_CS	# set correct cs
307	pushq	%rax		# target address in negative space
308	lretq
309ENDPROC(start_cpu0)
310#endif
311
312	/* SMP bootup changes these two */
313	__REFDATA
314	.balign	8
315	GLOBAL(initial_code)
316	.quad	x86_64_start_kernel
317	GLOBAL(initial_gs)
318	.quad	INIT_PER_CPU_VAR(irq_stack_union)
319
320	GLOBAL(stack_start)
321	.quad  init_thread_union+THREAD_SIZE-8
322	.word  0
323	__FINITDATA
324
325bad_address:
326	jmp bad_address
327
328	__INIT
329ENTRY(early_idt_handler_array)
330	# 104(%rsp) %rflags
331	#  96(%rsp) %cs
332	#  88(%rsp) %rip
333	#  80(%rsp) error code
334	i = 0
335	.rept NUM_EXCEPTION_VECTORS
336	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
337	pushq $0		# Dummy error code, to make stack frame uniform
338	.endif
339	pushq $i		# 72(%rsp) Vector number
340	jmp early_idt_handler_common
341	i = i + 1
342	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
343	.endr
344ENDPROC(early_idt_handler_array)
345
346early_idt_handler_common:
347	/*
348	 * The stack is the hardware frame, an error code or zero, and the
349	 * vector number.
350	 */
351	cld
352
353	cmpl $2,(%rsp)		# X86_TRAP_NMI
354	je .Lis_nmi		# Ignore NMI
355
 
 
356	cmpl $2,early_recursion_flag(%rip)
357	jz  1f
358	incl early_recursion_flag(%rip)
359
360	pushq %rax		# 64(%rsp)
361	pushq %rcx		# 56(%rsp)
362	pushq %rdx		# 48(%rsp)
363	pushq %rsi		# 40(%rsp)
364	pushq %rdi		# 32(%rsp)
365	pushq %r8		# 24(%rsp)
366	pushq %r9		# 16(%rsp)
367	pushq %r10		#  8(%rsp)
368	pushq %r11		#  0(%rsp)
369
370	cmpl $__KERNEL_CS,96(%rsp)
371	jne 11f
372
373	cmpl $14,72(%rsp)	# Page fault?
374	jnz 10f
375	GET_CR2_INTO(%rdi)	# can clobber any volatile register if pv
376	call early_make_pgtable
377	andl %eax,%eax
378	jz 20f			# All good
379
38010:
381	leaq 88(%rsp),%rdi	# Pointer to %rip
382	call early_fixup_exception
383	andl %eax,%eax
384	jnz 20f			# Found an exception entry
385
38611:
387#ifdef CONFIG_EARLY_PRINTK
388	GET_CR2_INTO(%r9)	# can clobber any volatile register if pv
389	movl 80(%rsp),%r8d	# error code
390	movl 72(%rsp),%esi	# vector number
391	movl 96(%rsp),%edx	# %cs
392	movq 88(%rsp),%rcx	# %rip
393	xorl %eax,%eax
394	leaq early_idt_msg(%rip),%rdi
395	call early_printk
396	cmpl $2,early_recursion_flag(%rip)
397	jz  1f
398	call dump_stack
399#ifdef CONFIG_KALLSYMS	
400	leaq early_idt_ripmsg(%rip),%rdi
401	movq 40(%rsp),%rsi	# %rip again
402	call __print_symbol
403#endif
404#endif /* EARLY_PRINTK */
4051:	hlt
406	jmp 1b
407
40820:	# Exception table entry found or page table generated
409	popq %r11
410	popq %r10
411	popq %r9
412	popq %r8
413	popq %rdi
414	popq %rsi
415	popq %rdx
416	popq %rcx
417	popq %rax
418	decl early_recursion_flag(%rip)
419.Lis_nmi:
420	addq $16,%rsp		# drop vector number and error code
421	INTERRUPT_RETURN
422ENDPROC(early_idt_handler_common)
423
424	__INITDATA
425
426	.balign 4
427early_recursion_flag:
428	.long 0
429
430#ifdef CONFIG_EARLY_PRINTK
431early_idt_msg:
432	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
433early_idt_ripmsg:
434	.asciz "RIP %s\n"
435#endif /* CONFIG_EARLY_PRINTK */
 
436
437#define NEXT_PAGE(name) \
438	.balign	PAGE_SIZE; \
439GLOBAL(name)
440
441/* Automate the creation of 1 to 1 mapping pmd entries */
442#define PMDS(START, PERM, COUNT)			\
443	i = 0 ;						\
444	.rept (COUNT) ;					\
445	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
446	i = i + 1 ;					\
447	.endr
448
449	__INITDATA
450NEXT_PAGE(early_level4_pgt)
451	.fill	511,8,0
452	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
453
454NEXT_PAGE(early_dynamic_pgts)
455	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
456
457	.data
458
459#ifndef CONFIG_XEN
460NEXT_PAGE(init_level4_pgt)
461	.fill	512,8,0
462#else
 
463NEXT_PAGE(init_level4_pgt)
464	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
465	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
466	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
467	.org    init_level4_pgt + L4_START_KERNEL*8, 0
468	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
469	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
470
471NEXT_PAGE(level3_ident_pgt)
472	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
473	.fill	511, 8, 0
474NEXT_PAGE(level2_ident_pgt)
475	/* Since I easily can, map the first 1G.
476	 * Don't set NX because code runs from these pages.
477	 */
478	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
479#endif
480
481NEXT_PAGE(level3_kernel_pgt)
482	.fill	L3_START_KERNEL,8,0
483	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
484	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
485	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
486
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487NEXT_PAGE(level2_kernel_pgt)
488	/*
489	 * 512 MB kernel mapping. We spend a full page on this pagetable
490	 * anyway.
491	 *
492	 * The kernel code+data+bss must not be bigger than that.
493	 *
494	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
495	 *  If you want to increase this then increase MODULES_VADDR
496	 *  too.)
497	 */
498	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
499		KERNEL_IMAGE_SIZE/PMD_SIZE)
500
501NEXT_PAGE(level2_fixmap_pgt)
502	.fill	506,8,0
503	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
504	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
505	.fill	5,8,0
506
507NEXT_PAGE(level1_fixmap_pgt)
508	.fill	512,8,0
509
510#undef PMDS
 
511
512	.data
513	.align 16
514	.globl early_gdt_descr
515early_gdt_descr:
516	.word	GDT_ENTRIES*8-1
517early_gdt_descr_base:
518	.quad	INIT_PER_CPU_VAR(gdt_page)
519
520ENTRY(phys_base)
521	/* This must match the first entry in level2_kernel_pgt */
522	.quad   0x0000000000000000
523
524#include "../../x86/xen/xen-head.S"
525	
 
 
 
 
 
526	__PAGE_ALIGNED_BSS
527NEXT_PAGE(empty_zero_page)
 
528	.skip PAGE_SIZE
529