Loading...
1/*
2 * ld script for the x86 kernel
3 *
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 *
6 * Modernisation, unification and other changes and fixes:
7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
8 *
9 *
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
15 */
16
17#ifdef CONFIG_X86_32
18#define LOAD_OFFSET __PAGE_OFFSET
19#else
20#define LOAD_OFFSET __START_KERNEL_map
21#endif
22
23#include <asm-generic/vmlinux.lds.h>
24#include <asm/asm-offsets.h>
25#include <asm/thread_info.h>
26#include <asm/page_types.h>
27#include <asm/cache.h>
28#include <asm/boot.h>
29
30#undef i386 /* in case the preprocessor is a 32bit one */
31
32OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
33
34#ifdef CONFIG_X86_32
35OUTPUT_ARCH(i386)
36ENTRY(phys_startup_32)
37jiffies = jiffies_64;
38#else
39OUTPUT_ARCH(i386:x86-64)
40ENTRY(phys_startup_64)
41jiffies_64 = jiffies;
42#endif
43
44#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
45/*
46 * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
47 * we retain large page mappings for boundaries spanning kernel text, rodata
48 * and data sections.
49 *
50 * However, kernel identity mappings will have different RWX permissions
51 * to the pages mapping to text and to the pages padding (which are freed) the
52 * text section. Hence kernel identity mappings will be broken to smaller
53 * pages. For 64-bit, kernel text and kernel identity mappings are different,
54 * so we can enable protection checks that come with CONFIG_DEBUG_RODATA,
55 * as well as retain 2MB large page mappings for kernel text.
56 */
57#define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
58
59#define X64_ALIGN_DEBUG_RODATA_END \
60 . = ALIGN(HPAGE_SIZE); \
61 __end_rodata_hpage_align = .;
62
63#else
64
65#define X64_ALIGN_DEBUG_RODATA_BEGIN
66#define X64_ALIGN_DEBUG_RODATA_END
67
68#endif
69
70PHDRS {
71 text PT_LOAD FLAGS(5); /* R_E */
72 data PT_LOAD FLAGS(6); /* RW_ */
73#ifdef CONFIG_X86_64
74#ifdef CONFIG_SMP
75 percpu PT_LOAD FLAGS(6); /* RW_ */
76#endif
77 init PT_LOAD FLAGS(7); /* RWE */
78#endif
79 note PT_NOTE FLAGS(0); /* ___ */
80}
81
82SECTIONS
83{
84#ifdef CONFIG_X86_32
85 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
86 phys_startup_32 = startup_32 - LOAD_OFFSET;
87#else
88 . = __START_KERNEL;
89 phys_startup_64 = startup_64 - LOAD_OFFSET;
90#endif
91
92 /* Text and read-only data */
93 .text : AT(ADDR(.text) - LOAD_OFFSET) {
94 _text = .;
95 /* bootstrapping code */
96 HEAD_TEXT
97#ifdef CONFIG_X86_32
98 . = ALIGN(PAGE_SIZE);
99 *(.text..page_aligned)
100#endif
101 . = ALIGN(8);
102 _stext = .;
103 TEXT_TEXT
104 SCHED_TEXT
105 LOCK_TEXT
106 KPROBES_TEXT
107 ENTRY_TEXT
108 IRQENTRY_TEXT
109 *(.fixup)
110 *(.gnu.warning)
111 /* End of text section */
112 _etext = .;
113 } :text = 0x9090
114
115 NOTES :text :note
116
117 EXCEPTION_TABLE(16) :text = 0x9090
118
119#if defined(CONFIG_DEBUG_RODATA)
120 /* .text should occupy whole number of pages */
121 . = ALIGN(PAGE_SIZE);
122#endif
123 X64_ALIGN_DEBUG_RODATA_BEGIN
124 RO_DATA(PAGE_SIZE)
125 X64_ALIGN_DEBUG_RODATA_END
126
127 /* Data */
128 .data : AT(ADDR(.data) - LOAD_OFFSET) {
129 /* Start of data section */
130 _sdata = .;
131
132 /* init_task */
133 INIT_TASK_DATA(THREAD_SIZE)
134
135#ifdef CONFIG_X86_32
136 /* 32 bit has nosave before _edata */
137 NOSAVE_DATA
138#endif
139
140 PAGE_ALIGNED_DATA(PAGE_SIZE)
141
142 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
143
144 DATA_DATA
145 CONSTRUCTORS
146
147 /* rarely changed data like cpu maps */
148 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
149
150 /* End of data section */
151 _edata = .;
152 } :data
153
154#ifdef CONFIG_X86_64
155
156 . = ALIGN(PAGE_SIZE);
157 __vvar_page = .;
158
159 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
160 /* work around gold bug 13023 */
161 __vvar_beginning_hack = .;
162
163 /* Place all vvars at the offsets in asm/vvar.h. */
164#define EMIT_VVAR(name, offset) \
165 . = __vvar_beginning_hack + offset; \
166 *(.vvar_ ## name)
167#define __VVAR_KERNEL_LDS
168#include <asm/vvar.h>
169#undef __VVAR_KERNEL_LDS
170#undef EMIT_VVAR
171
172 } :data
173
174 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
175
176#endif /* CONFIG_X86_64 */
177
178 /* Init code and data - will be freed after init */
179 . = ALIGN(PAGE_SIZE);
180 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
181 __init_begin = .; /* paired with __init_end */
182 }
183
184#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
185 /*
186 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
187 * output PHDR, so the next output section - .init.text - should
188 * start another segment - init.
189 */
190 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
191#endif
192
193 INIT_TEXT_SECTION(PAGE_SIZE)
194#ifdef CONFIG_X86_64
195 :init
196#endif
197
198 INIT_DATA_SECTION(16)
199
200 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
201 __x86_cpu_dev_start = .;
202 *(.x86_cpu_dev.init)
203 __x86_cpu_dev_end = .;
204 }
205
206 /*
207 * start address and size of operations which during runtime
208 * can be patched with virtualization friendly instructions or
209 * baremetal native ones. Think page table operations.
210 * Details in paravirt_types.h
211 */
212 . = ALIGN(8);
213 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
214 __parainstructions = .;
215 *(.parainstructions)
216 __parainstructions_end = .;
217 }
218
219 /*
220 * struct alt_inst entries. From the header (alternative.h):
221 * "Alternative instructions for different CPU types or capabilities"
222 * Think locking instructions on spinlocks.
223 */
224 . = ALIGN(8);
225 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
226 __alt_instructions = .;
227 *(.altinstructions)
228 __alt_instructions_end = .;
229 }
230
231 /*
232 * And here are the replacement instructions. The linker sticks
233 * them as binary blobs. The .altinstructions has enough data to
234 * get the address and the length of them to patch the kernel safely.
235 */
236 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
237 *(.altinstr_replacement)
238 }
239
240 /*
241 * struct iommu_table_entry entries are injected in this section.
242 * It is an array of IOMMUs which during run time gets sorted depending
243 * on its dependency order. After rootfs_initcall is complete
244 * this section can be safely removed.
245 */
246 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
247 __iommu_table = .;
248 *(.iommu_table)
249 __iommu_table_end = .;
250 }
251
252 . = ALIGN(8);
253 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
254 __apicdrivers = .;
255 *(.apicdrivers);
256 __apicdrivers_end = .;
257 }
258
259 . = ALIGN(8);
260 /*
261 * .exit.text is discard at runtime, not link time, to deal with
262 * references from .altinstructions and .eh_frame
263 */
264 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
265 EXIT_TEXT
266 }
267
268 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
269 EXIT_DATA
270 }
271
272#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
273 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
274#endif
275
276 . = ALIGN(PAGE_SIZE);
277
278 /* freed after init ends here */
279 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
280 __init_end = .;
281 }
282
283 /*
284 * smp_locks might be freed after init
285 * start/end must be page aligned
286 */
287 . = ALIGN(PAGE_SIZE);
288 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
289 __smp_locks = .;
290 *(.smp_locks)
291 . = ALIGN(PAGE_SIZE);
292 __smp_locks_end = .;
293 }
294
295#ifdef CONFIG_X86_64
296 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
297 NOSAVE_DATA
298 }
299#endif
300
301 /* BSS */
302 . = ALIGN(PAGE_SIZE);
303 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
304 __bss_start = .;
305 *(.bss..page_aligned)
306 *(.bss)
307 . = ALIGN(PAGE_SIZE);
308 __bss_stop = .;
309 }
310
311 . = ALIGN(PAGE_SIZE);
312 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
313 __brk_base = .;
314 . += 64 * 1024; /* 64k alignment slop space */
315 *(.brk_reservation) /* areas brk users have reserved */
316 __brk_limit = .;
317 }
318
319 _end = .;
320
321 STABS_DEBUG
322 DWARF_DEBUG
323
324 /* Sections to be discarded */
325 DISCARDS
326 /DISCARD/ : { *(.eh_frame) }
327}
328
329
330#ifdef CONFIG_X86_32
331/*
332 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
333 */
334. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
335 "kernel image bigger than KERNEL_IMAGE_SIZE");
336#else
337/*
338 * Per-cpu symbols which need to be offset from __per_cpu_load
339 * for the boot processor.
340 */
341#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
342INIT_PER_CPU(gdt_page);
343INIT_PER_CPU(irq_stack_union);
344
345/*
346 * Build-time check on the image size:
347 */
348. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
349 "kernel image bigger than KERNEL_IMAGE_SIZE");
350
351#ifdef CONFIG_SMP
352. = ASSERT((irq_stack_union == 0),
353 "irq_stack_union is not at start of per-cpu area");
354#endif
355
356#endif /* CONFIG_X86_32 */
357
358#ifdef CONFIG_KEXEC
359#include <asm/kexec.h>
360
361. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
362 "kexec control code size is too big");
363#endif
364
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script for the x86 kernel
4 *
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
9 *
10 *
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
16 */
17
18#define LOAD_OFFSET __START_KERNEL_map
19
20#define RUNTIME_DISCARD_EXIT
21#define EMITS_PT_NOTE
22#define RO_EXCEPTION_TABLE_ALIGN 16
23
24#include <asm-generic/vmlinux.lds.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27#include <asm/page_types.h>
28#include <asm/orc_lookup.h>
29#include <asm/cache.h>
30#include <asm/boot.h>
31
32#undef i386 /* in case the preprocessor is a 32bit one */
33
34OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
35
36#ifdef CONFIG_X86_32
37OUTPUT_ARCH(i386)
38ENTRY(phys_startup_32)
39#else
40OUTPUT_ARCH(i386:x86-64)
41ENTRY(phys_startup_64)
42#endif
43
44jiffies = jiffies_64;
45const_pcpu_hot = pcpu_hot;
46
47#if defined(CONFIG_X86_64)
48/*
49 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
50 * boundaries spanning kernel text, rodata and data sections.
51 *
52 * However, kernel identity mappings will have different RWX permissions
53 * to the pages mapping to text and to the pages padding (which are freed) the
54 * text section. Hence kernel identity mappings will be broken to smaller
55 * pages. For 64-bit, kernel text and kernel identity mappings are different,
56 * so we can enable protection checks as well as retain 2MB large page
57 * mappings for kernel text.
58 */
59#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
60
61#define X86_ALIGN_RODATA_END \
62 . = ALIGN(HPAGE_SIZE); \
63 __end_rodata_hpage_align = .; \
64 __end_rodata_aligned = .;
65
66#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
67#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
68
69/*
70 * This section contains data which will be mapped as decrypted. Memory
71 * encryption operates on a page basis. Make this section PMD-aligned
72 * to avoid splitting the pages while mapping the section early.
73 *
74 * Note: We use a separate section so that only this section gets
75 * decrypted to avoid exposing more than we wish.
76 */
77#define BSS_DECRYPTED \
78 . = ALIGN(PMD_SIZE); \
79 __start_bss_decrypted = .; \
80 *(.bss..decrypted); \
81 . = ALIGN(PAGE_SIZE); \
82 __start_bss_decrypted_unused = .; \
83 . = ALIGN(PMD_SIZE); \
84 __end_bss_decrypted = .; \
85
86#else
87
88#define X86_ALIGN_RODATA_BEGIN
89#define X86_ALIGN_RODATA_END \
90 . = ALIGN(PAGE_SIZE); \
91 __end_rodata_aligned = .;
92
93#define ALIGN_ENTRY_TEXT_BEGIN
94#define ALIGN_ENTRY_TEXT_END
95#define BSS_DECRYPTED
96
97#endif
98
99PHDRS {
100 text PT_LOAD FLAGS(5); /* R_E */
101 data PT_LOAD FLAGS(6); /* RW_ */
102#ifdef CONFIG_X86_64
103#ifdef CONFIG_SMP
104 percpu PT_LOAD FLAGS(6); /* RW_ */
105#endif
106 init PT_LOAD FLAGS(7); /* RWE */
107#endif
108 note PT_NOTE FLAGS(0); /* ___ */
109}
110
111SECTIONS
112{
113 . = __START_KERNEL;
114#ifdef CONFIG_X86_32
115 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
116#else
117 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
118#endif
119
120 /* Text and read-only data */
121 .text : AT(ADDR(.text) - LOAD_OFFSET) {
122 _text = .;
123 _stext = .;
124 /* bootstrapping code */
125 HEAD_TEXT
126 TEXT_TEXT
127 SCHED_TEXT
128 LOCK_TEXT
129 KPROBES_TEXT
130 SOFTIRQENTRY_TEXT
131#ifdef CONFIG_MITIGATION_RETPOLINE
132 *(.text..__x86.indirect_thunk)
133 *(.text..__x86.return_thunk)
134#endif
135 STATIC_CALL_TEXT
136
137 ALIGN_ENTRY_TEXT_BEGIN
138 *(.text..__x86.rethunk_untrain)
139 ENTRY_TEXT
140
141#ifdef CONFIG_MITIGATION_SRSO
142 /*
143 * See the comment above srso_alias_untrain_ret()'s
144 * definition.
145 */
146 . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
147 *(.text..__x86.rethunk_safe)
148#endif
149 ALIGN_ENTRY_TEXT_END
150 *(.gnu.warning)
151
152 } :text = 0xcccccccc
153
154 /* End of text section, which should occupy whole number of pages */
155 _etext = .;
156 . = ALIGN(PAGE_SIZE);
157
158 X86_ALIGN_RODATA_BEGIN
159 RO_DATA(PAGE_SIZE)
160 X86_ALIGN_RODATA_END
161
162 /* Data */
163 .data : AT(ADDR(.data) - LOAD_OFFSET) {
164 /* Start of data section */
165 _sdata = .;
166
167 /* init_task */
168 INIT_TASK_DATA(THREAD_SIZE)
169
170 /* equivalent to task_pt_regs(&init_task) */
171 __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
172
173#ifdef CONFIG_X86_32
174 /* 32 bit has nosave before _edata */
175 NOSAVE_DATA
176#endif
177
178 PAGE_ALIGNED_DATA(PAGE_SIZE)
179
180 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
181
182 DATA_DATA
183 CONSTRUCTORS
184
185 /* rarely changed data like cpu maps */
186 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
187
188 /* End of data section */
189 _edata = .;
190 } :data
191
192 BUG_TABLE
193
194 ORC_UNWIND_TABLE
195
196 /* Init code and data - will be freed after init */
197 . = ALIGN(PAGE_SIZE);
198 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
199 __init_begin = .; /* paired with __init_end */
200 }
201
202#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
203 /*
204 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
205 * output PHDR, so the next output section - .init.text - should
206 * start another segment - init.
207 */
208 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
209 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
210 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
211#endif
212
213 INIT_TEXT_SECTION(PAGE_SIZE)
214#ifdef CONFIG_X86_64
215 :init
216#endif
217
218 /*
219 * Section for code used exclusively before alternatives are run. All
220 * references to such code must be patched out by alternatives, normally
221 * by using X86_FEATURE_ALWAYS CPU feature bit.
222 *
223 * See static_cpu_has() for an example.
224 */
225 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
226 *(.altinstr_aux)
227 }
228
229 INIT_DATA_SECTION(16)
230
231 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
232 __x86_cpu_dev_start = .;
233 *(.x86_cpu_dev.init)
234 __x86_cpu_dev_end = .;
235 }
236
237#ifdef CONFIG_X86_INTEL_MID
238 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
239 LOAD_OFFSET) {
240 __x86_intel_mid_dev_start = .;
241 *(.x86_intel_mid_dev.init)
242 __x86_intel_mid_dev_end = .;
243 }
244#endif
245
246#ifdef CONFIG_MITIGATION_RETPOLINE
247 /*
248 * List of instructions that call/jmp/jcc to retpoline thunks
249 * __x86_indirect_thunk_*(). These instructions can be patched along
250 * with alternatives, after which the section can be freed.
251 */
252 . = ALIGN(8);
253 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
254 __retpoline_sites = .;
255 *(.retpoline_sites)
256 __retpoline_sites_end = .;
257 }
258
259 . = ALIGN(8);
260 .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
261 __return_sites = .;
262 *(.return_sites)
263 __return_sites_end = .;
264 }
265
266 . = ALIGN(8);
267 .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
268 __call_sites = .;
269 *(.call_sites)
270 __call_sites_end = .;
271 }
272#endif
273
274#ifdef CONFIG_X86_KERNEL_IBT
275 . = ALIGN(8);
276 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
277 __ibt_endbr_seal = .;
278 *(.ibt_endbr_seal)
279 __ibt_endbr_seal_end = .;
280 }
281#endif
282
283#ifdef CONFIG_FINEIBT
284 . = ALIGN(8);
285 .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
286 __cfi_sites = .;
287 *(.cfi_sites)
288 __cfi_sites_end = .;
289 }
290#endif
291
292 /*
293 * struct alt_inst entries. From the header (alternative.h):
294 * "Alternative instructions for different CPU types or capabilities"
295 * Think locking instructions on spinlocks.
296 */
297 . = ALIGN(8);
298 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
299 __alt_instructions = .;
300 *(.altinstructions)
301 __alt_instructions_end = .;
302 }
303
304 /*
305 * And here are the replacement instructions. The linker sticks
306 * them as binary blobs. The .altinstructions has enough data to
307 * get the address and the length of them to patch the kernel safely.
308 */
309 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
310 *(.altinstr_replacement)
311 }
312
313 . = ALIGN(8);
314 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
315 __apicdrivers = .;
316 *(.apicdrivers);
317 __apicdrivers_end = .;
318 }
319
320 . = ALIGN(8);
321 /*
322 * .exit.text is discarded at runtime, not link time, to deal with
323 * references from .altinstructions
324 */
325 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
326 EXIT_TEXT
327 }
328
329 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
330 EXIT_DATA
331 }
332
333#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
334 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
335#endif
336
337 RUNTIME_CONST_VARIABLES
338 RUNTIME_CONST(ptr, USER_PTR_MAX)
339
340 . = ALIGN(PAGE_SIZE);
341
342 /* freed after init ends here */
343 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
344 __init_end = .;
345 }
346
347 /*
348 * smp_locks might be freed after init
349 * start/end must be page aligned
350 */
351 . = ALIGN(PAGE_SIZE);
352 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
353 __smp_locks = .;
354 *(.smp_locks)
355 . = ALIGN(PAGE_SIZE);
356 __smp_locks_end = .;
357 }
358
359#ifdef CONFIG_X86_64
360 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
361 NOSAVE_DATA
362 }
363#endif
364
365 /* BSS */
366 . = ALIGN(PAGE_SIZE);
367 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
368 __bss_start = .;
369 *(.bss..page_aligned)
370 . = ALIGN(PAGE_SIZE);
371 *(BSS_MAIN)
372 BSS_DECRYPTED
373 . = ALIGN(PAGE_SIZE);
374 __bss_stop = .;
375 }
376
377 /*
378 * The memory occupied from _text to here, __end_of_kernel_reserve, is
379 * automatically reserved in setup_arch(). Anything after here must be
380 * explicitly reserved using memblock_reserve() or it will be discarded
381 * and treated as available memory.
382 */
383 __end_of_kernel_reserve = .;
384
385 . = ALIGN(PAGE_SIZE);
386 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
387 __brk_base = .;
388 . += 64 * 1024; /* 64k alignment slop space */
389 *(.bss..brk) /* areas brk users have reserved */
390 __brk_limit = .;
391 }
392
393 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
394 _end = .;
395
396#ifdef CONFIG_AMD_MEM_ENCRYPT
397 /*
398 * Early scratch/workarea section: Lives outside of the kernel proper
399 * (_text - _end).
400 *
401 * Resides after _end because even though the .brk section is after
402 * __end_of_kernel_reserve, the .brk section is later reserved as a
403 * part of the kernel. Since it is located after __end_of_kernel_reserve
404 * it will be discarded and become part of the available memory. As
405 * such, it can only be used by very early boot code and must not be
406 * needed afterwards.
407 *
408 * Currently used by SME for performing in-place encryption of the
409 * kernel during boot. Resides on a 2MB boundary to simplify the
410 * pagetable setup used for SME in-place encryption.
411 */
412 . = ALIGN(HPAGE_SIZE);
413 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
414 __init_scratch_begin = .;
415 *(.init.scratch)
416 . = ALIGN(HPAGE_SIZE);
417 __init_scratch_end = .;
418 }
419#endif
420
421 STABS_DEBUG
422 DWARF_DEBUG
423#ifdef CONFIG_PROPELLER_CLANG
424 .llvm_bb_addr_map : { *(.llvm_bb_addr_map) }
425#endif
426
427 ELF_DETAILS
428
429 DISCARDS
430
431 /*
432 * Make sure that the .got.plt is either completely empty or it
433 * contains only the lazy dispatch entries.
434 */
435 .got.plt (INFO) : { *(.got.plt) }
436 ASSERT(SIZEOF(.got.plt) == 0 ||
437#ifdef CONFIG_X86_64
438 SIZEOF(.got.plt) == 0x18,
439#else
440 SIZEOF(.got.plt) == 0xc,
441#endif
442 "Unexpected GOT/PLT entries detected!")
443
444 /*
445 * Sections that should stay zero sized, which is safer to
446 * explicitly check instead of blindly discarding.
447 */
448 .got : {
449 *(.got) *(.igot.*)
450 }
451 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
452
453 .plt : {
454 *(.plt) *(.plt.*) *(.iplt)
455 }
456 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
457
458 .rel.dyn : {
459 *(.rel.*) *(.rel_*)
460 }
461 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
462
463 .rela.dyn : {
464 *(.rela.*) *(.rela_*)
465 }
466 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
467}
468
469/*
470 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
471 */
472. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
473 "kernel image bigger than KERNEL_IMAGE_SIZE");
474
475/* needed for Clang - see arch/x86/entry/entry.S */
476PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
477
478#ifdef CONFIG_X86_64
479/*
480 * Per-cpu symbols which need to be offset from __per_cpu_load
481 * for the boot processor.
482 */
483#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
484INIT_PER_CPU(gdt_page);
485INIT_PER_CPU(fixed_percpu_data);
486INIT_PER_CPU(irq_stack_backing_store);
487
488#ifdef CONFIG_SMP
489. = ASSERT((fixed_percpu_data == 0),
490 "fixed_percpu_data is not at start of per-cpu area");
491#endif
492
493#ifdef CONFIG_MITIGATION_UNRET_ENTRY
494. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
495#endif
496
497#ifdef CONFIG_MITIGATION_SRSO
498. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
499/*
500 * GNU ld cannot do XOR until 2.41.
501 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
502 *
503 * LLVM lld cannot do XOR until lld-17.
504 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
505 *
506 * Instead do: (A | B) - (A & B) in order to compute the XOR
507 * of the two function addresses:
508 */
509. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
510 (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
511 "SRSO function pair won't alias");
512#endif
513
514#endif /* CONFIG_X86_64 */
515
516/*
517 * The symbols below are referenced using relative relocations in the
518 * respective ELF notes. This produces build time constants that the
519 * linker will never mark as relocatable. (Using just ABSOLUTE() is not
520 * sufficient for that).
521 */
522#ifdef CONFIG_XEN_PV
523xen_elfnote_entry_value =
524 ABSOLUTE(xen_elfnote_entry) + ABSOLUTE(startup_xen);
525#endif
526#ifdef CONFIG_PVH
527xen_elfnote_phys32_entry_value =
528 ABSOLUTE(xen_elfnote_phys32_entry) + ABSOLUTE(pvh_start_xen - LOAD_OFFSET);
529#endif