Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * ld script for the x86 kernel
  4 *
  5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  6 *
  7 * Modernisation, unification and other changes and fixes:
  8 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
  9 *
 10 *
 11 * Don't define absolute symbols until and unless you know that symbol
 12 * value is should remain constant even if kernel image is relocated
 13 * at run time. Absolute symbols are not relocated. If symbol value should
 14 * change if kernel is relocated, make the symbol section relative and
 15 * put it inside the section definition.
 16 */
 17
 18#ifdef CONFIG_X86_32
 19#define LOAD_OFFSET __PAGE_OFFSET
 20#else
 21#define LOAD_OFFSET __START_KERNEL_map
 22#endif
 23
 
 
 
 
 24#include <asm-generic/vmlinux.lds.h>
 25#include <asm/asm-offsets.h>
 26#include <asm/thread_info.h>
 27#include <asm/page_types.h>
 28#include <asm/orc_lookup.h>
 29#include <asm/cache.h>
 30#include <asm/boot.h>
 31
 32#undef i386     /* in case the preprocessor is a 32bit one */
 33
 34OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
 35
 36#ifdef CONFIG_X86_32
 37OUTPUT_ARCH(i386)
 38ENTRY(phys_startup_32)
 39jiffies = jiffies_64;
 40#else
 41OUTPUT_ARCH(i386:x86-64)
 42ENTRY(phys_startup_64)
 43jiffies_64 = jiffies;
 44#endif
 45
 
 
 46#if defined(CONFIG_X86_64)
 47/*
 48 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
 49 * boundaries spanning kernel text, rodata and data sections.
 50 *
 51 * However, kernel identity mappings will have different RWX permissions
 52 * to the pages mapping to text and to the pages padding (which are freed) the
 53 * text section. Hence kernel identity mappings will be broken to smaller
 54 * pages. For 64-bit, kernel text and kernel identity mappings are different,
 55 * so we can enable protection checks as well as retain 2MB large page
 56 * mappings for kernel text.
 57 */
 58#define X86_ALIGN_RODATA_BEGIN	. = ALIGN(HPAGE_SIZE);
 59
 60#define X86_ALIGN_RODATA_END					\
 61		. = ALIGN(HPAGE_SIZE);				\
 62		__end_rodata_hpage_align = .;			\
 63		__end_rodata_aligned = .;
 64
 65#define ALIGN_ENTRY_TEXT_BEGIN	. = ALIGN(PMD_SIZE);
 66#define ALIGN_ENTRY_TEXT_END	. = ALIGN(PMD_SIZE);
 67
 68/*
 69 * This section contains data which will be mapped as decrypted. Memory
 70 * encryption operates on a page basis. Make this section PMD-aligned
 71 * to avoid splitting the pages while mapping the section early.
 72 *
 73 * Note: We use a separate section so that only this section gets
 74 * decrypted to avoid exposing more than we wish.
 75 */
 76#define BSS_DECRYPTED						\
 77	. = ALIGN(PMD_SIZE);					\
 78	__start_bss_decrypted = .;				\
 79	*(.bss..decrypted);					\
 80	. = ALIGN(PAGE_SIZE);					\
 81	__start_bss_decrypted_unused = .;			\
 82	. = ALIGN(PMD_SIZE);					\
 83	__end_bss_decrypted = .;				\
 84
 85#else
 86
 87#define X86_ALIGN_RODATA_BEGIN
 88#define X86_ALIGN_RODATA_END					\
 89		. = ALIGN(PAGE_SIZE);				\
 90		__end_rodata_aligned = .;
 91
 92#define ALIGN_ENTRY_TEXT_BEGIN
 93#define ALIGN_ENTRY_TEXT_END
 94#define BSS_DECRYPTED
 95
 96#endif
 97
 98PHDRS {
 99	text PT_LOAD FLAGS(5);          /* R_E */
100	data PT_LOAD FLAGS(6);          /* RW_ */
101#ifdef CONFIG_X86_64
102#ifdef CONFIG_SMP
103	percpu PT_LOAD FLAGS(6);        /* RW_ */
104#endif
105	init PT_LOAD FLAGS(7);          /* RWE */
106#endif
107	note PT_NOTE FLAGS(0);          /* ___ */
108}
109
110SECTIONS
111{
112#ifdef CONFIG_X86_32
113	. = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
114	phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
115#else
116	. = __START_KERNEL;
117	phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
118#endif
119
120	/* Text and read-only data */
121	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
122		_text = .;
123		_stext = .;
124		/* bootstrapping code */
125		HEAD_TEXT
126		TEXT_TEXT
127		SCHED_TEXT
128		CPUIDLE_TEXT
129		LOCK_TEXT
130		KPROBES_TEXT
131		ALIGN_ENTRY_TEXT_BEGIN
132		ENTRY_TEXT
133		IRQENTRY_TEXT
134		ALIGN_ENTRY_TEXT_END
135		SOFTIRQENTRY_TEXT
136		*(.fixup)
137		*(.gnu.warning)
138
139#ifdef CONFIG_RETPOLINE
140		__indirect_thunk_start = .;
141		*(.text.__x86.indirect_thunk)
142		__indirect_thunk_end = .;
143#endif
 
144
145		/* End of text section */
146		_etext = .;
147	} :text = 0x9090
148
149	NOTES :text :note
 
 
 
 
 
 
 
 
 
150
151	EXCEPTION_TABLE(16) :text = 0x9090
152
153	/* .text should occupy whole number of pages */
 
154	. = ALIGN(PAGE_SIZE);
 
155	X86_ALIGN_RODATA_BEGIN
156	RO_DATA(PAGE_SIZE)
157	X86_ALIGN_RODATA_END
158
159	/* Data */
160	.data : AT(ADDR(.data) - LOAD_OFFSET) {
161		/* Start of data section */
162		_sdata = .;
163
164		/* init_task */
165		INIT_TASK_DATA(THREAD_SIZE)
166
167#ifdef CONFIG_X86_32
168		/* 32 bit has nosave before _edata */
169		NOSAVE_DATA
170#endif
171
172		PAGE_ALIGNED_DATA(PAGE_SIZE)
173
174		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
175
176		DATA_DATA
177		CONSTRUCTORS
178
179		/* rarely changed data like cpu maps */
180		READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
181
182		/* End of data section */
183		_edata = .;
184	} :data
185
186	BUG_TABLE
187
188	ORC_UNWIND_TABLE
189
190	. = ALIGN(PAGE_SIZE);
191	__vvar_page = .;
192
193	.vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
194		/* work around gold bug 13023 */
195		__vvar_beginning_hack = .;
196
197		/* Place all vvars at the offsets in asm/vvar.h. */
198#define EMIT_VVAR(name, offset) 			\
199		. = __vvar_beginning_hack + offset;	\
200		*(.vvar_ ## name)
201#define __VVAR_KERNEL_LDS
202#include <asm/vvar.h>
203#undef __VVAR_KERNEL_LDS
204#undef EMIT_VVAR
205
206		/*
207		 * Pad the rest of the page with zeros.  Otherwise the loader
208		 * can leave garbage here.
209		 */
210		. = __vvar_beginning_hack + PAGE_SIZE;
211	} :data
212
213	. = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
214
215	/* Init code and data - will be freed after init */
216	. = ALIGN(PAGE_SIZE);
217	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
218		__init_begin = .; /* paired with __init_end */
219	}
220
221#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
222	/*
223	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
224	 * output PHDR, so the next output section - .init.text - should
225	 * start another segment - init.
226	 */
227	PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
228	ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
229	       "per-CPU data too large - increase CONFIG_PHYSICAL_START")
230#endif
231
232	INIT_TEXT_SECTION(PAGE_SIZE)
233#ifdef CONFIG_X86_64
234	:init
235#endif
236
237	/*
238	 * Section for code used exclusively before alternatives are run. All
239	 * references to such code must be patched out by alternatives, normally
240	 * by using X86_FEATURE_ALWAYS CPU feature bit.
241	 *
242	 * See static_cpu_has() for an example.
243	 */
244	.altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
245		*(.altinstr_aux)
246	}
247
248	INIT_DATA_SECTION(16)
249
250	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
251		__x86_cpu_dev_start = .;
252		*(.x86_cpu_dev.init)
253		__x86_cpu_dev_end = .;
254	}
255
256#ifdef CONFIG_X86_INTEL_MID
257	.x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
258								LOAD_OFFSET) {
259		__x86_intel_mid_dev_start = .;
260		*(.x86_intel_mid_dev.init)
261		__x86_intel_mid_dev_end = .;
262	}
263#endif
264
 
265	/*
266	 * start address and size of operations which during runtime
267	 * can be patched with virtualization friendly instructions or
268	 * baremetal native ones. Think page table operations.
269	 * Details in paravirt_types.h
270	 */
271	. = ALIGN(8);
272	.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
273		__parainstructions = .;
274		*(.parainstructions)
275		__parainstructions_end = .;
276	}
277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278	/*
279	 * struct alt_inst entries. From the header (alternative.h):
280	 * "Alternative instructions for different CPU types or capabilities"
281	 * Think locking instructions on spinlocks.
282	 */
283	. = ALIGN(8);
284	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
285		__alt_instructions = .;
286		*(.altinstructions)
287		__alt_instructions_end = .;
288	}
289
290	/*
291	 * And here are the replacement instructions. The linker sticks
292	 * them as binary blobs. The .altinstructions has enough data to
293	 * get the address and the length of them to patch the kernel safely.
294	 */
295	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
296		*(.altinstr_replacement)
297	}
298
299	/*
300	 * struct iommu_table_entry entries are injected in this section.
301	 * It is an array of IOMMUs which during run time gets sorted depending
302	 * on its dependency order. After rootfs_initcall is complete
303	 * this section can be safely removed.
304	 */
305	.iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
306		__iommu_table = .;
307		*(.iommu_table)
308		__iommu_table_end = .;
309	}
310
311	. = ALIGN(8);
312	.apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
313		__apicdrivers = .;
314		*(.apicdrivers);
315		__apicdrivers_end = .;
316	}
317
318	. = ALIGN(8);
319	/*
320	 * .exit.text is discard at runtime, not link time, to deal with
321	 *  references from .altinstructions and .eh_frame
322	 */
323	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
324		EXIT_TEXT
325	}
326
327	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
328		EXIT_DATA
329	}
330
331#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
332	PERCPU_SECTION(INTERNODE_CACHE_BYTES)
333#endif
334
335	. = ALIGN(PAGE_SIZE);
336
337	/* freed after init ends here */
338	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
339		__init_end = .;
340	}
341
342	/*
343	 * smp_locks might be freed after init
344	 * start/end must be page aligned
345	 */
346	. = ALIGN(PAGE_SIZE);
347	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
348		__smp_locks = .;
349		*(.smp_locks)
350		. = ALIGN(PAGE_SIZE);
351		__smp_locks_end = .;
352	}
353
354#ifdef CONFIG_X86_64
355	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
356		NOSAVE_DATA
357	}
358#endif
359
360	/* BSS */
361	. = ALIGN(PAGE_SIZE);
362	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
363		__bss_start = .;
364		*(.bss..page_aligned)
 
365		*(BSS_MAIN)
366		BSS_DECRYPTED
367		. = ALIGN(PAGE_SIZE);
368		__bss_stop = .;
369	}
370
371	/*
372	 * The memory occupied from _text to here, __end_of_kernel_reserve, is
373	 * automatically reserved in setup_arch(). Anything after here must be
374	 * explicitly reserved using memblock_reserve() or it will be discarded
375	 * and treated as available memory.
376	 */
377	__end_of_kernel_reserve = .;
378
379	. = ALIGN(PAGE_SIZE);
380	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
381		__brk_base = .;
382		. += 64 * 1024;		/* 64k alignment slop space */
383		*(.brk_reservation)	/* areas brk users have reserved */
384		__brk_limit = .;
385	}
386
387	. = ALIGN(PAGE_SIZE);		/* keep VO_INIT_SIZE page aligned */
388	_end = .;
389
390#ifdef CONFIG_AMD_MEM_ENCRYPT
391	/*
392	 * Early scratch/workarea section: Lives outside of the kernel proper
393	 * (_text - _end).
394	 *
395	 * Resides after _end because even though the .brk section is after
396	 * __end_of_kernel_reserve, the .brk section is later reserved as a
397	 * part of the kernel. Since it is located after __end_of_kernel_reserve
398	 * it will be discarded and become part of the available memory. As
399	 * such, it can only be used by very early boot code and must not be
400	 * needed afterwards.
401	 *
402	 * Currently used by SME for performing in-place encryption of the
403	 * kernel during boot. Resides on a 2MB boundary to simplify the
404	 * pagetable setup used for SME in-place encryption.
405	 */
406	. = ALIGN(HPAGE_SIZE);
407	.init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
408		__init_scratch_begin = .;
409		*(.init.scratch)
410		. = ALIGN(HPAGE_SIZE);
411		__init_scratch_end = .;
412	}
413#endif
414
415	STABS_DEBUG
416	DWARF_DEBUG
 
417
418	DISCARDS
419	/DISCARD/ : {
420		*(.eh_frame)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421	}
422}
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
425#ifdef CONFIG_X86_32
426/*
427 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
428 */
429. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
430	   "kernel image bigger than KERNEL_IMAGE_SIZE");
431#else
 
432/*
433 * Per-cpu symbols which need to be offset from __per_cpu_load
434 * for the boot processor.
435 */
436#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
437INIT_PER_CPU(gdt_page);
438INIT_PER_CPU(fixed_percpu_data);
439INIT_PER_CPU(irq_stack_backing_store);
440
441/*
442 * Build-time check on the image size:
443 */
444. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
445	   "kernel image bigger than KERNEL_IMAGE_SIZE");
446
447#ifdef CONFIG_SMP
448. = ASSERT((fixed_percpu_data == 0),
449           "fixed_percpu_data is not at start of per-cpu area");
450#endif
451
452#endif /* CONFIG_X86_32 */
453
454#ifdef CONFIG_KEXEC_CORE
455#include <asm/kexec.h>
456
457. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
458           "kexec control code size is too big");
 
 
 
 
 
 
 
 
 
 
 
 
 
459#endif
460
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * ld script for the x86 kernel
  4 *
  5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  6 *
  7 * Modernisation, unification and other changes and fixes:
  8 *   Copyright (C) 2007-2009  Sam Ravnborg <sam@ravnborg.org>
  9 *
 10 *
 11 * Don't define absolute symbols until and unless you know that symbol
 12 * value is should remain constant even if kernel image is relocated
 13 * at run time. Absolute symbols are not relocated. If symbol value should
 14 * change if kernel is relocated, make the symbol section relative and
 15 * put it inside the section definition.
 16 */
 17
 18#ifdef CONFIG_X86_32
 19#define LOAD_OFFSET __PAGE_OFFSET
 20#else
 21#define LOAD_OFFSET __START_KERNEL_map
 22#endif
 23
 24#define RUNTIME_DISCARD_EXIT
 25#define EMITS_PT_NOTE
 26#define RO_EXCEPTION_TABLE_ALIGN	16
 27
 28#include <asm-generic/vmlinux.lds.h>
 29#include <asm/asm-offsets.h>
 30#include <asm/thread_info.h>
 31#include <asm/page_types.h>
 32#include <asm/orc_lookup.h>
 33#include <asm/cache.h>
 34#include <asm/boot.h>
 35
 36#undef i386     /* in case the preprocessor is a 32bit one */
 37
 38OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
 39
 40#ifdef CONFIG_X86_32
 41OUTPUT_ARCH(i386)
 42ENTRY(phys_startup_32)
 
 43#else
 44OUTPUT_ARCH(i386:x86-64)
 45ENTRY(phys_startup_64)
 
 46#endif
 47
 48jiffies = jiffies_64;
 49
 50#if defined(CONFIG_X86_64)
 51/*
 52 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
 53 * boundaries spanning kernel text, rodata and data sections.
 54 *
 55 * However, kernel identity mappings will have different RWX permissions
 56 * to the pages mapping to text and to the pages padding (which are freed) the
 57 * text section. Hence kernel identity mappings will be broken to smaller
 58 * pages. For 64-bit, kernel text and kernel identity mappings are different,
 59 * so we can enable protection checks as well as retain 2MB large page
 60 * mappings for kernel text.
 61 */
 62#define X86_ALIGN_RODATA_BEGIN	. = ALIGN(HPAGE_SIZE);
 63
 64#define X86_ALIGN_RODATA_END					\
 65		. = ALIGN(HPAGE_SIZE);				\
 66		__end_rodata_hpage_align = .;			\
 67		__end_rodata_aligned = .;
 68
 69#define ALIGN_ENTRY_TEXT_BEGIN	. = ALIGN(PMD_SIZE);
 70#define ALIGN_ENTRY_TEXT_END	. = ALIGN(PMD_SIZE);
 71
 72/*
 73 * This section contains data which will be mapped as decrypted. Memory
 74 * encryption operates on a page basis. Make this section PMD-aligned
 75 * to avoid splitting the pages while mapping the section early.
 76 *
 77 * Note: We use a separate section so that only this section gets
 78 * decrypted to avoid exposing more than we wish.
 79 */
 80#define BSS_DECRYPTED						\
 81	. = ALIGN(PMD_SIZE);					\
 82	__start_bss_decrypted = .;				\
 83	*(.bss..decrypted);					\
 84	. = ALIGN(PAGE_SIZE);					\
 85	__start_bss_decrypted_unused = .;			\
 86	. = ALIGN(PMD_SIZE);					\
 87	__end_bss_decrypted = .;				\
 88
 89#else
 90
 91#define X86_ALIGN_RODATA_BEGIN
 92#define X86_ALIGN_RODATA_END					\
 93		. = ALIGN(PAGE_SIZE);				\
 94		__end_rodata_aligned = .;
 95
 96#define ALIGN_ENTRY_TEXT_BEGIN
 97#define ALIGN_ENTRY_TEXT_END
 98#define BSS_DECRYPTED
 99
100#endif
101
102PHDRS {
103	text PT_LOAD FLAGS(5);          /* R_E */
104	data PT_LOAD FLAGS(6);          /* RW_ */
105#ifdef CONFIG_X86_64
106#ifdef CONFIG_SMP
107	percpu PT_LOAD FLAGS(6);        /* RW_ */
108#endif
109	init PT_LOAD FLAGS(7);          /* RWE */
110#endif
111	note PT_NOTE FLAGS(0);          /* ___ */
112}
113
114SECTIONS
115{
116#ifdef CONFIG_X86_32
117	. = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
118	phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
119#else
120	. = __START_KERNEL;
121	phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
122#endif
123
124	/* Text and read-only data */
125	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
126		_text = .;
127		_stext = .;
128		/* bootstrapping code */
129		HEAD_TEXT
130		TEXT_TEXT
131		SCHED_TEXT
 
132		LOCK_TEXT
133		KPROBES_TEXT
 
 
 
 
134		SOFTIRQENTRY_TEXT
 
 
 
135#ifdef CONFIG_RETPOLINE
136		*(.text..__x86.indirect_thunk)
137		*(.text..__x86.return_thunk)
 
138#endif
139		STATIC_CALL_TEXT
140
141		ALIGN_ENTRY_TEXT_BEGIN
142		*(.text..__x86.rethunk_untrain)
143		ENTRY_TEXT
144
145#ifdef CONFIG_CPU_SRSO
146		/*
147		 * See the comment above srso_alias_untrain_ret()'s
148		 * definition.
149		 */
150		. = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
151		*(.text..__x86.rethunk_safe)
152#endif
153		ALIGN_ENTRY_TEXT_END
154		*(.gnu.warning)
155
156	} :text = 0xcccccccc
157
158	/* End of text section, which should occupy whole number of pages */
159	_etext = .;
160	. = ALIGN(PAGE_SIZE);
161
162	X86_ALIGN_RODATA_BEGIN
163	RO_DATA(PAGE_SIZE)
164	X86_ALIGN_RODATA_END
165
166	/* Data */
167	.data : AT(ADDR(.data) - LOAD_OFFSET) {
168		/* Start of data section */
169		_sdata = .;
170
171		/* init_task */
172		INIT_TASK_DATA(THREAD_SIZE)
173
174#ifdef CONFIG_X86_32
175		/* 32 bit has nosave before _edata */
176		NOSAVE_DATA
177#endif
178
179		PAGE_ALIGNED_DATA(PAGE_SIZE)
180
181		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
182
183		DATA_DATA
184		CONSTRUCTORS
185
186		/* rarely changed data like cpu maps */
187		READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
188
189		/* End of data section */
190		_edata = .;
191	} :data
192
193	BUG_TABLE
194
195	ORC_UNWIND_TABLE
196
197	. = ALIGN(PAGE_SIZE);
198	__vvar_page = .;
199
200	.vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
201		/* work around gold bug 13023 */
202		__vvar_beginning_hack = .;
203
204		/* Place all vvars at the offsets in asm/vvar.h. */
205#define EMIT_VVAR(name, offset)				\
206		. = __vvar_beginning_hack + offset;	\
207		*(.vvar_ ## name)
 
208#include <asm/vvar.h>
 
209#undef EMIT_VVAR
210
211		/*
212		 * Pad the rest of the page with zeros.  Otherwise the loader
213		 * can leave garbage here.
214		 */
215		. = __vvar_beginning_hack + PAGE_SIZE;
216	} :data
217
218	. = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
219
220	/* Init code and data - will be freed after init */
221	. = ALIGN(PAGE_SIZE);
222	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
223		__init_begin = .; /* paired with __init_end */
224	}
225
226#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
227	/*
228	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
229	 * output PHDR, so the next output section - .init.text - should
230	 * start another segment - init.
231	 */
232	PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
233	ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
234	       "per-CPU data too large - increase CONFIG_PHYSICAL_START")
235#endif
236
237	INIT_TEXT_SECTION(PAGE_SIZE)
238#ifdef CONFIG_X86_64
239	:init
240#endif
241
242	/*
243	 * Section for code used exclusively before alternatives are run. All
244	 * references to such code must be patched out by alternatives, normally
245	 * by using X86_FEATURE_ALWAYS CPU feature bit.
246	 *
247	 * See static_cpu_has() for an example.
248	 */
249	.altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
250		*(.altinstr_aux)
251	}
252
253	INIT_DATA_SECTION(16)
254
255	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
256		__x86_cpu_dev_start = .;
257		*(.x86_cpu_dev.init)
258		__x86_cpu_dev_end = .;
259	}
260
261#ifdef CONFIG_X86_INTEL_MID
262	.x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
263								LOAD_OFFSET) {
264		__x86_intel_mid_dev_start = .;
265		*(.x86_intel_mid_dev.init)
266		__x86_intel_mid_dev_end = .;
267	}
268#endif
269
270#ifdef CONFIG_RETPOLINE
271	/*
272	 * List of instructions that call/jmp/jcc to retpoline thunks
273	 * __x86_indirect_thunk_*(). These instructions can be patched along
274	 * with alternatives, after which the section can be freed.
 
275	 */
276	. = ALIGN(8);
277	.retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
278		__retpoline_sites = .;
279		*(.retpoline_sites)
280		__retpoline_sites_end = .;
281	}
282
283	. = ALIGN(8);
284	.return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
285		__return_sites = .;
286		*(.return_sites)
287		__return_sites_end = .;
288	}
289
290	. = ALIGN(8);
291	.call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
292		__call_sites = .;
293		*(.call_sites)
294		__call_sites_end = .;
295	}
296#endif
297
298#ifdef CONFIG_X86_KERNEL_IBT
299	. = ALIGN(8);
300	.ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
301		__ibt_endbr_seal = .;
302		*(.ibt_endbr_seal)
303		__ibt_endbr_seal_end = .;
304	}
305#endif
306
307#ifdef CONFIG_FINEIBT
308	. = ALIGN(8);
309	.cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
310		__cfi_sites = .;
311		*(.cfi_sites)
312		__cfi_sites_end = .;
313	}
314#endif
315
316	/*
317	 * struct alt_inst entries. From the header (alternative.h):
318	 * "Alternative instructions for different CPU types or capabilities"
319	 * Think locking instructions on spinlocks.
320	 */
321	. = ALIGN(8);
322	.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
323		__alt_instructions = .;
324		*(.altinstructions)
325		__alt_instructions_end = .;
326	}
327
328	/*
329	 * And here are the replacement instructions. The linker sticks
330	 * them as binary blobs. The .altinstructions has enough data to
331	 * get the address and the length of them to patch the kernel safely.
332	 */
333	.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
334		*(.altinstr_replacement)
335	}
336
 
 
 
 
 
 
 
 
 
 
 
 
337	. = ALIGN(8);
338	.apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
339		__apicdrivers = .;
340		*(.apicdrivers);
341		__apicdrivers_end = .;
342	}
343
344	. = ALIGN(8);
345	/*
346	 * .exit.text is discarded at runtime, not link time, to deal with
347	 *  references from .altinstructions
348	 */
349	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
350		EXIT_TEXT
351	}
352
353	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
354		EXIT_DATA
355	}
356
357#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
358	PERCPU_SECTION(INTERNODE_CACHE_BYTES)
359#endif
360
361	. = ALIGN(PAGE_SIZE);
362
363	/* freed after init ends here */
364	.init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
365		__init_end = .;
366	}
367
368	/*
369	 * smp_locks might be freed after init
370	 * start/end must be page aligned
371	 */
372	. = ALIGN(PAGE_SIZE);
373	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
374		__smp_locks = .;
375		*(.smp_locks)
376		. = ALIGN(PAGE_SIZE);
377		__smp_locks_end = .;
378	}
379
380#ifdef CONFIG_X86_64
381	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
382		NOSAVE_DATA
383	}
384#endif
385
386	/* BSS */
387	. = ALIGN(PAGE_SIZE);
388	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
389		__bss_start = .;
390		*(.bss..page_aligned)
391		. = ALIGN(PAGE_SIZE);
392		*(BSS_MAIN)
393		BSS_DECRYPTED
394		. = ALIGN(PAGE_SIZE);
395		__bss_stop = .;
396	}
397
398	/*
399	 * The memory occupied from _text to here, __end_of_kernel_reserve, is
400	 * automatically reserved in setup_arch(). Anything after here must be
401	 * explicitly reserved using memblock_reserve() or it will be discarded
402	 * and treated as available memory.
403	 */
404	__end_of_kernel_reserve = .;
405
406	. = ALIGN(PAGE_SIZE);
407	.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
408		__brk_base = .;
409		. += 64 * 1024;		/* 64k alignment slop space */
410		*(.bss..brk)		/* areas brk users have reserved */
411		__brk_limit = .;
412	}
413
414	. = ALIGN(PAGE_SIZE);		/* keep VO_INIT_SIZE page aligned */
415	_end = .;
416
417#ifdef CONFIG_AMD_MEM_ENCRYPT
418	/*
419	 * Early scratch/workarea section: Lives outside of the kernel proper
420	 * (_text - _end).
421	 *
422	 * Resides after _end because even though the .brk section is after
423	 * __end_of_kernel_reserve, the .brk section is later reserved as a
424	 * part of the kernel. Since it is located after __end_of_kernel_reserve
425	 * it will be discarded and become part of the available memory. As
426	 * such, it can only be used by very early boot code and must not be
427	 * needed afterwards.
428	 *
429	 * Currently used by SME for performing in-place encryption of the
430	 * kernel during boot. Resides on a 2MB boundary to simplify the
431	 * pagetable setup used for SME in-place encryption.
432	 */
433	. = ALIGN(HPAGE_SIZE);
434	.init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
435		__init_scratch_begin = .;
436		*(.init.scratch)
437		. = ALIGN(HPAGE_SIZE);
438		__init_scratch_end = .;
439	}
440#endif
441
442	STABS_DEBUG
443	DWARF_DEBUG
444	ELF_DETAILS
445
446	DISCARDS
447
448	/*
449	 * Make sure that the .got.plt is either completely empty or it
450	 * contains only the lazy dispatch entries.
451	 */
452	.got.plt (INFO) : { *(.got.plt) }
453	ASSERT(SIZEOF(.got.plt) == 0 ||
454#ifdef CONFIG_X86_64
455	       SIZEOF(.got.plt) == 0x18,
456#else
457	       SIZEOF(.got.plt) == 0xc,
458#endif
459	       "Unexpected GOT/PLT entries detected!")
460
461	/*
462	 * Sections that should stay zero sized, which is safer to
463	 * explicitly check instead of blindly discarding.
464	 */
465	.got : {
466		*(.got) *(.igot.*)
467	}
468	ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
469
470	.plt : {
471		*(.plt) *(.plt.*) *(.iplt)
472	}
473	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
474
475	.rel.dyn : {
476		*(.rel.*) *(.rel_*)
477	}
478	ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
479
480	.rela.dyn : {
481		*(.rela.*) *(.rela_*)
482	}
483	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
484}
485
 
486/*
487 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
488 */
489. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
490	   "kernel image bigger than KERNEL_IMAGE_SIZE");
491
492#ifdef CONFIG_X86_64
493/*
494 * Per-cpu symbols which need to be offset from __per_cpu_load
495 * for the boot processor.
496 */
497#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
498INIT_PER_CPU(gdt_page);
499INIT_PER_CPU(fixed_percpu_data);
500INIT_PER_CPU(irq_stack_backing_store);
501
 
 
 
 
 
 
502#ifdef CONFIG_SMP
503. = ASSERT((fixed_percpu_data == 0),
504           "fixed_percpu_data is not at start of per-cpu area");
505#endif
506
507#ifdef CONFIG_CPU_UNRET_ENTRY
508. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
509#endif
 
510
511#ifdef CONFIG_CPU_SRSO
512. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
513/*
514 * GNU ld cannot do XOR until 2.41.
515 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
516 *
517 * LLVM lld cannot do XOR until lld-17.
518 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
519 *
520 * Instead do: (A | B) - (A & B) in order to compute the XOR
521 * of the two function addresses:
522 */
523. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
524		(ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
525		"SRSO function pair won't alias");
526#endif
527
528#endif /* CONFIG_X86_64 */