Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script for the x86 kernel
4 *
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
9 *
10 *
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
16 */
17
18#ifdef CONFIG_X86_32
19#define LOAD_OFFSET __PAGE_OFFSET
20#else
21#define LOAD_OFFSET __START_KERNEL_map
22#endif
23
24#include <asm-generic/vmlinux.lds.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27#include <asm/page_types.h>
28#include <asm/orc_lookup.h>
29#include <asm/cache.h>
30#include <asm/boot.h>
31
32#undef i386 /* in case the preprocessor is a 32bit one */
33
34OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
35
36#ifdef CONFIG_X86_32
37OUTPUT_ARCH(i386)
38ENTRY(phys_startup_32)
39jiffies = jiffies_64;
40#else
41OUTPUT_ARCH(i386:x86-64)
42ENTRY(phys_startup_64)
43jiffies_64 = jiffies;
44#endif
45
46#if defined(CONFIG_X86_64)
47/*
48 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
49 * boundaries spanning kernel text, rodata and data sections.
50 *
51 * However, kernel identity mappings will have different RWX permissions
52 * to the pages mapping to text and to the pages padding (which are freed) the
53 * text section. Hence kernel identity mappings will be broken to smaller
54 * pages. For 64-bit, kernel text and kernel identity mappings are different,
55 * so we can enable protection checks as well as retain 2MB large page
56 * mappings for kernel text.
57 */
58#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
59
60#define X86_ALIGN_RODATA_END \
61 . = ALIGN(HPAGE_SIZE); \
62 __end_rodata_hpage_align = .; \
63 __end_rodata_aligned = .;
64
65#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
66#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
67
68/*
69 * This section contains data which will be mapped as decrypted. Memory
70 * encryption operates on a page basis. Make this section PMD-aligned
71 * to avoid splitting the pages while mapping the section early.
72 *
73 * Note: We use a separate section so that only this section gets
74 * decrypted to avoid exposing more than we wish.
75 */
76#define BSS_DECRYPTED \
77 . = ALIGN(PMD_SIZE); \
78 __start_bss_decrypted = .; \
79 *(.bss..decrypted); \
80 . = ALIGN(PAGE_SIZE); \
81 __start_bss_decrypted_unused = .; \
82 . = ALIGN(PMD_SIZE); \
83 __end_bss_decrypted = .; \
84
85#else
86
87#define X86_ALIGN_RODATA_BEGIN
88#define X86_ALIGN_RODATA_END \
89 . = ALIGN(PAGE_SIZE); \
90 __end_rodata_aligned = .;
91
92#define ALIGN_ENTRY_TEXT_BEGIN
93#define ALIGN_ENTRY_TEXT_END
94#define BSS_DECRYPTED
95
96#endif
97
98PHDRS {
99 text PT_LOAD FLAGS(5); /* R_E */
100 data PT_LOAD FLAGS(6); /* RW_ */
101#ifdef CONFIG_X86_64
102#ifdef CONFIG_SMP
103 percpu PT_LOAD FLAGS(6); /* RW_ */
104#endif
105 init PT_LOAD FLAGS(7); /* RWE */
106#endif
107 note PT_NOTE FLAGS(0); /* ___ */
108}
109
110SECTIONS
111{
112#ifdef CONFIG_X86_32
113 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
114 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
115#else
116 . = __START_KERNEL;
117 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
118#endif
119
120 /* Text and read-only data */
121 .text : AT(ADDR(.text) - LOAD_OFFSET) {
122 _text = .;
123 _stext = .;
124 /* bootstrapping code */
125 HEAD_TEXT
126 TEXT_TEXT
127 SCHED_TEXT
128 CPUIDLE_TEXT
129 LOCK_TEXT
130 KPROBES_TEXT
131 ALIGN_ENTRY_TEXT_BEGIN
132 ENTRY_TEXT
133 IRQENTRY_TEXT
134 ALIGN_ENTRY_TEXT_END
135 SOFTIRQENTRY_TEXT
136 *(.fixup)
137 *(.gnu.warning)
138
139#ifdef CONFIG_RETPOLINE
140 __indirect_thunk_start = .;
141 *(.text.__x86.indirect_thunk)
142 __indirect_thunk_end = .;
143#endif
144
145 /* End of text section */
146 _etext = .;
147 } :text = 0x9090
148
149 NOTES :text :note
150
151 EXCEPTION_TABLE(16) :text = 0x9090
152
153 /* .text should occupy whole number of pages */
154 . = ALIGN(PAGE_SIZE);
155 X86_ALIGN_RODATA_BEGIN
156 RO_DATA(PAGE_SIZE)
157 X86_ALIGN_RODATA_END
158
159 /* Data */
160 .data : AT(ADDR(.data) - LOAD_OFFSET) {
161 /* Start of data section */
162 _sdata = .;
163
164 /* init_task */
165 INIT_TASK_DATA(THREAD_SIZE)
166
167#ifdef CONFIG_X86_32
168 /* 32 bit has nosave before _edata */
169 NOSAVE_DATA
170#endif
171
172 PAGE_ALIGNED_DATA(PAGE_SIZE)
173
174 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
175
176 DATA_DATA
177 CONSTRUCTORS
178
179 /* rarely changed data like cpu maps */
180 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
181
182 /* End of data section */
183 _edata = .;
184 } :data
185
186 BUG_TABLE
187
188 ORC_UNWIND_TABLE
189
190 . = ALIGN(PAGE_SIZE);
191 __vvar_page = .;
192
193 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
194 /* work around gold bug 13023 */
195 __vvar_beginning_hack = .;
196
197 /* Place all vvars at the offsets in asm/vvar.h. */
198#define EMIT_VVAR(name, offset) \
199 . = __vvar_beginning_hack + offset; \
200 *(.vvar_ ## name)
201#define __VVAR_KERNEL_LDS
202#include <asm/vvar.h>
203#undef __VVAR_KERNEL_LDS
204#undef EMIT_VVAR
205
206 /*
207 * Pad the rest of the page with zeros. Otherwise the loader
208 * can leave garbage here.
209 */
210 . = __vvar_beginning_hack + PAGE_SIZE;
211 } :data
212
213 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
214
215 /* Init code and data - will be freed after init */
216 . = ALIGN(PAGE_SIZE);
217 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
218 __init_begin = .; /* paired with __init_end */
219 }
220
221#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
222 /*
223 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
224 * output PHDR, so the next output section - .init.text - should
225 * start another segment - init.
226 */
227 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
228 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
229 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
230#endif
231
232 INIT_TEXT_SECTION(PAGE_SIZE)
233#ifdef CONFIG_X86_64
234 :init
235#endif
236
237 /*
238 * Section for code used exclusively before alternatives are run. All
239 * references to such code must be patched out by alternatives, normally
240 * by using X86_FEATURE_ALWAYS CPU feature bit.
241 *
242 * See static_cpu_has() for an example.
243 */
244 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
245 *(.altinstr_aux)
246 }
247
248 INIT_DATA_SECTION(16)
249
250 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
251 __x86_cpu_dev_start = .;
252 *(.x86_cpu_dev.init)
253 __x86_cpu_dev_end = .;
254 }
255
256#ifdef CONFIG_X86_INTEL_MID
257 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
258 LOAD_OFFSET) {
259 __x86_intel_mid_dev_start = .;
260 *(.x86_intel_mid_dev.init)
261 __x86_intel_mid_dev_end = .;
262 }
263#endif
264
265 /*
266 * start address and size of operations which during runtime
267 * can be patched with virtualization friendly instructions or
268 * baremetal native ones. Think page table operations.
269 * Details in paravirt_types.h
270 */
271 . = ALIGN(8);
272 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
273 __parainstructions = .;
274 *(.parainstructions)
275 __parainstructions_end = .;
276 }
277
278 /*
279 * struct alt_inst entries. From the header (alternative.h):
280 * "Alternative instructions for different CPU types or capabilities"
281 * Think locking instructions on spinlocks.
282 */
283 . = ALIGN(8);
284 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
285 __alt_instructions = .;
286 *(.altinstructions)
287 __alt_instructions_end = .;
288 }
289
290 /*
291 * And here are the replacement instructions. The linker sticks
292 * them as binary blobs. The .altinstructions has enough data to
293 * get the address and the length of them to patch the kernel safely.
294 */
295 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
296 *(.altinstr_replacement)
297 }
298
299 /*
300 * struct iommu_table_entry entries are injected in this section.
301 * It is an array of IOMMUs which during run time gets sorted depending
302 * on its dependency order. After rootfs_initcall is complete
303 * this section can be safely removed.
304 */
305 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
306 __iommu_table = .;
307 *(.iommu_table)
308 __iommu_table_end = .;
309 }
310
311 . = ALIGN(8);
312 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
313 __apicdrivers = .;
314 *(.apicdrivers);
315 __apicdrivers_end = .;
316 }
317
318 . = ALIGN(8);
319 /*
320 * .exit.text is discard at runtime, not link time, to deal with
321 * references from .altinstructions and .eh_frame
322 */
323 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
324 EXIT_TEXT
325 }
326
327 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
328 EXIT_DATA
329 }
330
331#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
332 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
333#endif
334
335 . = ALIGN(PAGE_SIZE);
336
337 /* freed after init ends here */
338 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
339 __init_end = .;
340 }
341
342 /*
343 * smp_locks might be freed after init
344 * start/end must be page aligned
345 */
346 . = ALIGN(PAGE_SIZE);
347 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
348 __smp_locks = .;
349 *(.smp_locks)
350 . = ALIGN(PAGE_SIZE);
351 __smp_locks_end = .;
352 }
353
354#ifdef CONFIG_X86_64
355 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
356 NOSAVE_DATA
357 }
358#endif
359
360 /* BSS */
361 . = ALIGN(PAGE_SIZE);
362 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
363 __bss_start = .;
364 *(.bss..page_aligned)
365 *(BSS_MAIN)
366 BSS_DECRYPTED
367 . = ALIGN(PAGE_SIZE);
368 __bss_stop = .;
369 }
370
371 /*
372 * The memory occupied from _text to here, __end_of_kernel_reserve, is
373 * automatically reserved in setup_arch(). Anything after here must be
374 * explicitly reserved using memblock_reserve() or it will be discarded
375 * and treated as available memory.
376 */
377 __end_of_kernel_reserve = .;
378
379 . = ALIGN(PAGE_SIZE);
380 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
381 __brk_base = .;
382 . += 64 * 1024; /* 64k alignment slop space */
383 *(.brk_reservation) /* areas brk users have reserved */
384 __brk_limit = .;
385 }
386
387 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
388 _end = .;
389
390#ifdef CONFIG_AMD_MEM_ENCRYPT
391 /*
392 * Early scratch/workarea section: Lives outside of the kernel proper
393 * (_text - _end).
394 *
395 * Resides after _end because even though the .brk section is after
396 * __end_of_kernel_reserve, the .brk section is later reserved as a
397 * part of the kernel. Since it is located after __end_of_kernel_reserve
398 * it will be discarded and become part of the available memory. As
399 * such, it can only be used by very early boot code and must not be
400 * needed afterwards.
401 *
402 * Currently used by SME for performing in-place encryption of the
403 * kernel during boot. Resides on a 2MB boundary to simplify the
404 * pagetable setup used for SME in-place encryption.
405 */
406 . = ALIGN(HPAGE_SIZE);
407 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
408 __init_scratch_begin = .;
409 *(.init.scratch)
410 . = ALIGN(HPAGE_SIZE);
411 __init_scratch_end = .;
412 }
413#endif
414
415 STABS_DEBUG
416 DWARF_DEBUG
417
418 DISCARDS
419 /DISCARD/ : {
420 *(.eh_frame)
421 }
422}
423
424
425#ifdef CONFIG_X86_32
426/*
427 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
428 */
429. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
430 "kernel image bigger than KERNEL_IMAGE_SIZE");
431#else
432/*
433 * Per-cpu symbols which need to be offset from __per_cpu_load
434 * for the boot processor.
435 */
436#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
437INIT_PER_CPU(gdt_page);
438INIT_PER_CPU(fixed_percpu_data);
439INIT_PER_CPU(irq_stack_backing_store);
440
441/*
442 * Build-time check on the image size:
443 */
444. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
445 "kernel image bigger than KERNEL_IMAGE_SIZE");
446
447#ifdef CONFIG_SMP
448. = ASSERT((fixed_percpu_data == 0),
449 "fixed_percpu_data is not at start of per-cpu area");
450#endif
451
452#endif /* CONFIG_X86_32 */
453
454#ifdef CONFIG_KEXEC_CORE
455#include <asm/kexec.h>
456
457. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
458 "kexec control code size is too big");
459#endif
460
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script for the x86 kernel
4 *
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
9 *
10 *
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
16 */
17
18#ifdef CONFIG_X86_32
19#define LOAD_OFFSET __PAGE_OFFSET
20#else
21#define LOAD_OFFSET __START_KERNEL_map
22#endif
23
24#define RUNTIME_DISCARD_EXIT
25#define EMITS_PT_NOTE
26#define RO_EXCEPTION_TABLE_ALIGN 16
27
28#include <asm-generic/vmlinux.lds.h>
29#include <asm/asm-offsets.h>
30#include <asm/thread_info.h>
31#include <asm/page_types.h>
32#include <asm/orc_lookup.h>
33#include <asm/cache.h>
34#include <asm/boot.h>
35
36#undef i386 /* in case the preprocessor is a 32bit one */
37
38OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
39
40#ifdef CONFIG_X86_32
41OUTPUT_ARCH(i386)
42ENTRY(phys_startup_32)
43#else
44OUTPUT_ARCH(i386:x86-64)
45ENTRY(phys_startup_64)
46#endif
47
48jiffies = jiffies_64;
49
50#if defined(CONFIG_X86_64)
51/*
52 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
53 * boundaries spanning kernel text, rodata and data sections.
54 *
55 * However, kernel identity mappings will have different RWX permissions
56 * to the pages mapping to text and to the pages padding (which are freed) the
57 * text section. Hence kernel identity mappings will be broken to smaller
58 * pages. For 64-bit, kernel text and kernel identity mappings are different,
59 * so we can enable protection checks as well as retain 2MB large page
60 * mappings for kernel text.
61 */
62#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
63
64#define X86_ALIGN_RODATA_END \
65 . = ALIGN(HPAGE_SIZE); \
66 __end_rodata_hpage_align = .; \
67 __end_rodata_aligned = .;
68
69#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
70#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
71
72/*
73 * This section contains data which will be mapped as decrypted. Memory
74 * encryption operates on a page basis. Make this section PMD-aligned
75 * to avoid splitting the pages while mapping the section early.
76 *
77 * Note: We use a separate section so that only this section gets
78 * decrypted to avoid exposing more than we wish.
79 */
80#define BSS_DECRYPTED \
81 . = ALIGN(PMD_SIZE); \
82 __start_bss_decrypted = .; \
83 *(.bss..decrypted); \
84 . = ALIGN(PAGE_SIZE); \
85 __start_bss_decrypted_unused = .; \
86 . = ALIGN(PMD_SIZE); \
87 __end_bss_decrypted = .; \
88
89#else
90
91#define X86_ALIGN_RODATA_BEGIN
92#define X86_ALIGN_RODATA_END \
93 . = ALIGN(PAGE_SIZE); \
94 __end_rodata_aligned = .;
95
96#define ALIGN_ENTRY_TEXT_BEGIN
97#define ALIGN_ENTRY_TEXT_END
98#define BSS_DECRYPTED
99
100#endif
101
102PHDRS {
103 text PT_LOAD FLAGS(5); /* R_E */
104 data PT_LOAD FLAGS(6); /* RW_ */
105#ifdef CONFIG_X86_64
106#ifdef CONFIG_SMP
107 percpu PT_LOAD FLAGS(6); /* RW_ */
108#endif
109 init PT_LOAD FLAGS(7); /* RWE */
110#endif
111 note PT_NOTE FLAGS(0); /* ___ */
112}
113
114SECTIONS
115{
116#ifdef CONFIG_X86_32
117 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
118 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
119#else
120 . = __START_KERNEL;
121 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
122#endif
123
124 /* Text and read-only data */
125 .text : AT(ADDR(.text) - LOAD_OFFSET) {
126 _text = .;
127 _stext = .;
128 /* bootstrapping code */
129 HEAD_TEXT
130 TEXT_TEXT
131 SCHED_TEXT
132 CPUIDLE_TEXT
133 LOCK_TEXT
134 KPROBES_TEXT
135 ALIGN_ENTRY_TEXT_BEGIN
136 ENTRY_TEXT
137 ALIGN_ENTRY_TEXT_END
138 SOFTIRQENTRY_TEXT
139 STATIC_CALL_TEXT
140 *(.fixup)
141 *(.gnu.warning)
142
143#ifdef CONFIG_RETPOLINE
144 __indirect_thunk_start = .;
145 *(.text.__x86.indirect_thunk)
146 __indirect_thunk_end = .;
147#endif
148 } :text =0xcccc
149
150 /* End of text section, which should occupy whole number of pages */
151 _etext = .;
152 . = ALIGN(PAGE_SIZE);
153
154 X86_ALIGN_RODATA_BEGIN
155 RO_DATA(PAGE_SIZE)
156 X86_ALIGN_RODATA_END
157
158 /* Data */
159 .data : AT(ADDR(.data) - LOAD_OFFSET) {
160 /* Start of data section */
161 _sdata = .;
162
163 /* init_task */
164 INIT_TASK_DATA(THREAD_SIZE)
165
166#ifdef CONFIG_X86_32
167 /* 32 bit has nosave before _edata */
168 NOSAVE_DATA
169#endif
170
171 PAGE_ALIGNED_DATA(PAGE_SIZE)
172
173 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
174
175 DATA_DATA
176 CONSTRUCTORS
177
178 /* rarely changed data like cpu maps */
179 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
180
181 /* End of data section */
182 _edata = .;
183 } :data
184
185 BUG_TABLE
186
187 ORC_UNWIND_TABLE
188
189 . = ALIGN(PAGE_SIZE);
190 __vvar_page = .;
191
192 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
193 /* work around gold bug 13023 */
194 __vvar_beginning_hack = .;
195
196 /* Place all vvars at the offsets in asm/vvar.h. */
197#define EMIT_VVAR(name, offset) \
198 . = __vvar_beginning_hack + offset; \
199 *(.vvar_ ## name)
200#include <asm/vvar.h>
201#undef EMIT_VVAR
202
203 /*
204 * Pad the rest of the page with zeros. Otherwise the loader
205 * can leave garbage here.
206 */
207 . = __vvar_beginning_hack + PAGE_SIZE;
208 } :data
209
210 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
211
212 /* Init code and data - will be freed after init */
213 . = ALIGN(PAGE_SIZE);
214 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
215 __init_begin = .; /* paired with __init_end */
216 }
217
218#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
219 /*
220 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
221 * output PHDR, so the next output section - .init.text - should
222 * start another segment - init.
223 */
224 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
225 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
226 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
227#endif
228
229 INIT_TEXT_SECTION(PAGE_SIZE)
230#ifdef CONFIG_X86_64
231 :init
232#endif
233
234 /*
235 * Section for code used exclusively before alternatives are run. All
236 * references to such code must be patched out by alternatives, normally
237 * by using X86_FEATURE_ALWAYS CPU feature bit.
238 *
239 * See static_cpu_has() for an example.
240 */
241 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
242 *(.altinstr_aux)
243 }
244
245 INIT_DATA_SECTION(16)
246
247 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
248 __x86_cpu_dev_start = .;
249 *(.x86_cpu_dev.init)
250 __x86_cpu_dev_end = .;
251 }
252
253#ifdef CONFIG_X86_INTEL_MID
254 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
255 LOAD_OFFSET) {
256 __x86_intel_mid_dev_start = .;
257 *(.x86_intel_mid_dev.init)
258 __x86_intel_mid_dev_end = .;
259 }
260#endif
261
262 /*
263 * start address and size of operations which during runtime
264 * can be patched with virtualization friendly instructions or
265 * baremetal native ones. Think page table operations.
266 * Details in paravirt_types.h
267 */
268 . = ALIGN(8);
269 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
270 __parainstructions = .;
271 *(.parainstructions)
272 __parainstructions_end = .;
273 }
274
275 /*
276 * struct alt_inst entries. From the header (alternative.h):
277 * "Alternative instructions for different CPU types or capabilities"
278 * Think locking instructions on spinlocks.
279 */
280 . = ALIGN(8);
281 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
282 __alt_instructions = .;
283 *(.altinstructions)
284 __alt_instructions_end = .;
285 }
286
287 /*
288 * And here are the replacement instructions. The linker sticks
289 * them as binary blobs. The .altinstructions has enough data to
290 * get the address and the length of them to patch the kernel safely.
291 */
292 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
293 *(.altinstr_replacement)
294 }
295
296 /*
297 * struct iommu_table_entry entries are injected in this section.
298 * It is an array of IOMMUs which during run time gets sorted depending
299 * on its dependency order. After rootfs_initcall is complete
300 * this section can be safely removed.
301 */
302 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
303 __iommu_table = .;
304 *(.iommu_table)
305 __iommu_table_end = .;
306 }
307
308 . = ALIGN(8);
309 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
310 __apicdrivers = .;
311 *(.apicdrivers);
312 __apicdrivers_end = .;
313 }
314
315 . = ALIGN(8);
316 /*
317 * .exit.text is discarded at runtime, not link time, to deal with
318 * references from .altinstructions
319 */
320 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
321 EXIT_TEXT
322 }
323
324 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
325 EXIT_DATA
326 }
327
328#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
329 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
330#endif
331
332 . = ALIGN(PAGE_SIZE);
333
334 /* freed after init ends here */
335 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
336 __init_end = .;
337 }
338
339 /*
340 * smp_locks might be freed after init
341 * start/end must be page aligned
342 */
343 . = ALIGN(PAGE_SIZE);
344 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
345 __smp_locks = .;
346 *(.smp_locks)
347 . = ALIGN(PAGE_SIZE);
348 __smp_locks_end = .;
349 }
350
351#ifdef CONFIG_X86_64
352 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
353 NOSAVE_DATA
354 }
355#endif
356
357 /* BSS */
358 . = ALIGN(PAGE_SIZE);
359 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
360 __bss_start = .;
361 *(.bss..page_aligned)
362 . = ALIGN(PAGE_SIZE);
363 *(BSS_MAIN)
364 BSS_DECRYPTED
365 . = ALIGN(PAGE_SIZE);
366 __bss_stop = .;
367 }
368
369 /*
370 * The memory occupied from _text to here, __end_of_kernel_reserve, is
371 * automatically reserved in setup_arch(). Anything after here must be
372 * explicitly reserved using memblock_reserve() or it will be discarded
373 * and treated as available memory.
374 */
375 __end_of_kernel_reserve = .;
376
377 . = ALIGN(PAGE_SIZE);
378 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
379 __brk_base = .;
380 . += 64 * 1024; /* 64k alignment slop space */
381 *(.brk_reservation) /* areas brk users have reserved */
382 __brk_limit = .;
383 }
384
385 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
386 _end = .;
387
388#ifdef CONFIG_AMD_MEM_ENCRYPT
389 /*
390 * Early scratch/workarea section: Lives outside of the kernel proper
391 * (_text - _end).
392 *
393 * Resides after _end because even though the .brk section is after
394 * __end_of_kernel_reserve, the .brk section is later reserved as a
395 * part of the kernel. Since it is located after __end_of_kernel_reserve
396 * it will be discarded and become part of the available memory. As
397 * such, it can only be used by very early boot code and must not be
398 * needed afterwards.
399 *
400 * Currently used by SME for performing in-place encryption of the
401 * kernel during boot. Resides on a 2MB boundary to simplify the
402 * pagetable setup used for SME in-place encryption.
403 */
404 . = ALIGN(HPAGE_SIZE);
405 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
406 __init_scratch_begin = .;
407 *(.init.scratch)
408 . = ALIGN(HPAGE_SIZE);
409 __init_scratch_end = .;
410 }
411#endif
412
413 STABS_DEBUG
414 DWARF_DEBUG
415 ELF_DETAILS
416
417 DISCARDS
418
419 /*
420 * Make sure that the .got.plt is either completely empty or it
421 * contains only the lazy dispatch entries.
422 */
423 .got.plt (INFO) : { *(.got.plt) }
424 ASSERT(SIZEOF(.got.plt) == 0 ||
425#ifdef CONFIG_X86_64
426 SIZEOF(.got.plt) == 0x18,
427#else
428 SIZEOF(.got.plt) == 0xc,
429#endif
430 "Unexpected GOT/PLT entries detected!")
431
432 /*
433 * Sections that should stay zero sized, which is safer to
434 * explicitly check instead of blindly discarding.
435 */
436 .got : {
437 *(.got) *(.igot.*)
438 }
439 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
440
441 .plt : {
442 *(.plt) *(.plt.*) *(.iplt)
443 }
444 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
445
446 .rel.dyn : {
447 *(.rel.*) *(.rel_*)
448 }
449 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
450
451 .rela.dyn : {
452 *(.rela.*) *(.rela_*)
453 }
454 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
455}
456
457/*
458 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
459 */
460. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
461 "kernel image bigger than KERNEL_IMAGE_SIZE");
462
463#ifdef CONFIG_X86_64
464/*
465 * Per-cpu symbols which need to be offset from __per_cpu_load
466 * for the boot processor.
467 */
468#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
469INIT_PER_CPU(gdt_page);
470INIT_PER_CPU(fixed_percpu_data);
471INIT_PER_CPU(irq_stack_backing_store);
472
473#ifdef CONFIG_SMP
474. = ASSERT((fixed_percpu_data == 0),
475 "fixed_percpu_data is not at start of per-cpu area");
476#endif
477
478#endif /* CONFIG_X86_64 */
479
480#ifdef CONFIG_KEXEC_CORE
481#include <asm/kexec.h>
482
483. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
484 "kexec control code size is too big");
485#endif
486