Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#include <asm/cache.h>
  4#include <asm/ptrace.h>
  5#include <asm/pgtable.h>
  6#include <asm/thread_info.h>
  7
  8#include <asm-generic/vmlinux.lds.h>
  9
 10OUTPUT_FORMAT("elf64-ia64-little")
 11OUTPUT_ARCH(ia64)
 12ENTRY(phys_start)
 13jiffies = jiffies_64;
 14
 15PHDRS {
 16	code   PT_LOAD;
 17	percpu PT_LOAD;
 18	data   PT_LOAD;
 19	note   PT_NOTE;
 20	unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
 21}
 22
 23SECTIONS {
 24	/*
 25	 * unwind exit sections must be discarded before
 26	 * the rest of the sections get included.
 27	 */
 28	/DISCARD/ : {
 29		*(.IA_64.unwind.exit.text)
 30		*(.IA_64.unwind_info.exit.text)
 31		*(.comment)
 32		*(.note)
 33	}
 34
 35	v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
 36	phys_start = _start - LOAD_OFFSET;
 37
 38	code : {
 39	} :code
 40	. = KERNEL_START;
 41
 42	_text = .;
 43	_stext = .;
 44
 45	.text : AT(ADDR(.text) - LOAD_OFFSET) {
 46		__start_ivt_text = .;
 47		*(.text..ivt)
 48		__end_ivt_text = .;
 49		TEXT_TEXT
 50		SCHED_TEXT
 51		CPUIDLE_TEXT
 52		LOCK_TEXT
 53		KPROBES_TEXT
 54		*(.gnu.linkonce.t*)
 55	}
 56
 57	.text2 : AT(ADDR(.text2) - LOAD_OFFSET)	{
 58		*(.text2)
 59	}
 60
 61#ifdef CONFIG_SMP
 62	.text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
 63		*(.text..lock)
 64	}
 65#endif
 66	_etext = .;
 67
 68	/*
 69	 * Read-only data
 70	 */
 71	NOTES :code :note       /* put .notes in text and mark in PT_NOTE  */
 72	code_continues : {
 73	} : code               /* switch back to regular program...  */
 74
 75	EXCEPTION_TABLE(16)
 76
 77	/* MCA table */
 78	. = ALIGN(16);
 79	__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
 80		__start___mca_table = .;
 81		*(__mca_table)
 82		__stop___mca_table = .;
 83	}
 84
 85	.data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
 86		__start___phys_stack_reg_patchlist = .;
 87		*(.data..patch.phys_stack_reg)
 88		__end___phys_stack_reg_patchlist = .;
 89	}
 90
 91	/*
 92	 * Global data
 93	 */
 94	_data = .;
 95
 96	/* Unwind info & table: */
 97	. = ALIGN(8);
 98	.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
 99		*(.IA_64.unwind_info*)
100	}
101	.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
102		__start_unwind = .;
103		*(.IA_64.unwind*)
104		__end_unwind = .;
105	} :code :unwind
106	code_continues2 : {
107	} : code
108
109	RODATA
110
111	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
112		__start_opd = .;
113		*(.opd)
114		__end_opd = .;
115	}
116
117	/*
118	 * Initialization code and data:
119	 */
120	. = ALIGN(PAGE_SIZE);
121	__init_begin = .;
122
123	INIT_TEXT_SECTION(PAGE_SIZE)
124	INIT_DATA_SECTION(16)
125
126	.data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
127		__start___vtop_patchlist = .;
128		*(.data..patch.vtop)
129		__end___vtop_patchlist = .;
130	}
131
132	.data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
133		__start___rse_patchlist = .;
134		*(.data..patch.rse)
135		__end___rse_patchlist = .;
136	}
137
138	.data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
139		__start___mckinley_e9_bundles = .;
140		*(.data..patch.mckinley_e9)
141		__end___mckinley_e9_bundles = .;
142	}
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144#if defined(CONFIG_IA64_GENERIC)
145	/* Machine Vector */
146	. = ALIGN(16);
147	.machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
148		machvec_start = .;
149		*(.machvec)
150		machvec_end = .;
151	}
152#endif
153
154#ifdef	CONFIG_SMP
155	. = ALIGN(PERCPU_PAGE_SIZE);
156	__cpu0_per_cpu = .;
157	. = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
158#endif
159
160	. = ALIGN(PAGE_SIZE);
161	__init_end = .;
162
163	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
164		PAGE_ALIGNED_DATA(PAGE_SIZE)
165		. = ALIGN(PAGE_SIZE);
166		__start_gate_section = .;
167		*(.data..gate)
168		__stop_gate_section = .;
 
 
 
 
 
 
169	}
170	/*
171	 * make sure the gate page doesn't expose
172	 * kernel data
173	 */
174	. = ALIGN(PAGE_SIZE);
175
176	/* Per-cpu data: */
177	. = ALIGN(PERCPU_PAGE_SIZE);
178	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
179	__phys_per_cpu_start = __per_cpu_load;
180	/*
181	 * ensure percpu data fits
182	 * into percpu page size
183	 */
184	. = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
185
186	data : {
187	} :data
188	.data : AT(ADDR(.data) - LOAD_OFFSET) {
189		_sdata  =  .;
190		INIT_TASK_DATA(PAGE_SIZE)
191		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
192		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
193		DATA_DATA
194		*(.data1)
195		*(.gnu.linkonce.d*)
196		CONSTRUCTORS
197	}
198
199	BUG_TABLE
200
201	. = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
202	.got : AT(ADDR(.got) - LOAD_OFFSET) {
203		*(.got.plt)
204		*(.got)
205	}
206	__gp = ADDR(.got) + 0x200000;
207
208	/*
209	 * We want the small data sections together,
210	 * so single-instruction offsets can access
211	 * them all, and initialized data all before
212	 * uninitialized, so we can shorten the
213	 * on-disk segment size.
214	 */
215	.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
216		*(.sdata)
217		*(.sdata1)
218		*(.srdata)
219	}
220	_edata  =  .;
221
222	BSS_SECTION(0, 0, 0)
223
224	_end = .;
225
226	code : {
227	} :code
228
229	STABS_DEBUG
230	DWARF_DEBUG
231
232	/* Default discards */
233	DISCARDS
234}
v3.5.6
 
  1
  2#include <asm/cache.h>
  3#include <asm/ptrace.h>
  4#include <asm/pgtable.h>
 
  5
  6#include <asm-generic/vmlinux.lds.h>
  7
  8OUTPUT_FORMAT("elf64-ia64-little")
  9OUTPUT_ARCH(ia64)
 10ENTRY(phys_start)
 11jiffies = jiffies_64;
 12
 13PHDRS {
 14	code   PT_LOAD;
 15	percpu PT_LOAD;
 16	data   PT_LOAD;
 17	note   PT_NOTE;
 18	unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
 19}
 20
 21SECTIONS {
 22	/*
 23	 * unwind exit sections must be discarded before
 24	 * the rest of the sections get included.
 25	 */
 26	/DISCARD/ : {
 27		*(.IA_64.unwind.exit.text)
 28		*(.IA_64.unwind_info.exit.text)
 29		*(.comment)
 30		*(.note)
 31	}
 32
 33	v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
 34	phys_start = _start - LOAD_OFFSET;
 35
 36	code : {
 37	} :code
 38	. = KERNEL_START;
 39
 40	_text = .;
 41	_stext = .;
 42
 43	.text : AT(ADDR(.text) - LOAD_OFFSET) {
 44		__start_ivt_text = .;
 45		*(.text..ivt)
 46		__end_ivt_text = .;
 47		TEXT_TEXT
 48		SCHED_TEXT
 
 49		LOCK_TEXT
 50		KPROBES_TEXT
 51		*(.gnu.linkonce.t*)
 52	}
 53
 54	.text2 : AT(ADDR(.text2) - LOAD_OFFSET)	{
 55		*(.text2)
 56	}
 57
 58#ifdef CONFIG_SMP
 59	.text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
 60		*(.text..lock)
 61	}
 62#endif
 63	_etext = .;
 64
 65	/*
 66	 * Read-only data
 67	 */
 68	NOTES :code :note       /* put .notes in text and mark in PT_NOTE  */
 69	code_continues : {
 70	} : code               /* switch back to regular program...  */
 71
 72	EXCEPTION_TABLE(16)
 73
 74	/* MCA table */
 75	. = ALIGN(16);
 76	__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
 77		__start___mca_table = .;
 78		*(__mca_table)
 79		__stop___mca_table = .;
 80	}
 81
 82	.data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
 83		__start___phys_stack_reg_patchlist = .;
 84		*(.data..patch.phys_stack_reg)
 85		__end___phys_stack_reg_patchlist = .;
 86	}
 87
 88	/*
 89	 * Global data
 90	 */
 91	_data = .;
 92
 93	/* Unwind info & table: */
 94	. = ALIGN(8);
 95	.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
 96		*(.IA_64.unwind_info*)
 97	}
 98	.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
 99		__start_unwind = .;
100		*(.IA_64.unwind*)
101		__end_unwind = .;
102	} :code :unwind
103	code_continues2 : {
104	} : code
105
106	RODATA
107
108	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
 
109		*(.opd)
 
110	}
111
112	/*
113	 * Initialization code and data:
114	 */
115	. = ALIGN(PAGE_SIZE);
116	__init_begin = .;
117
118	INIT_TEXT_SECTION(PAGE_SIZE)
119	INIT_DATA_SECTION(16)
120
121	.data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
122		__start___vtop_patchlist = .;
123		*(.data..patch.vtop)
124		__end___vtop_patchlist = .;
125	}
126
127	.data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
128		__start___rse_patchlist = .;
129		*(.data..patch.rse)
130		__end___rse_patchlist = .;
131	}
132
133	.data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
134		__start___mckinley_e9_bundles = .;
135		*(.data..patch.mckinley_e9)
136		__end___mckinley_e9_bundles = .;
137	}
138
139#if defined(CONFIG_PARAVIRT)
140	. = ALIGN(16);
141	.paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
142		__start_paravirt_bundles = .;
143		*(.paravirt_bundles)
144		__stop_paravirt_bundles = .;
145	}
146	. = ALIGN(16);
147	.paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
148		__start_paravirt_insts = .;
149		*(.paravirt_insts)
150		__stop_paravirt_insts = .;
151	}
152	. = ALIGN(16);
153	.paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
154		__start_paravirt_branches = .;
155		*(.paravirt_branches)
156		__stop_paravirt_branches = .;
157	}
158#endif
159
160#if defined(CONFIG_IA64_GENERIC)
161	/* Machine Vector */
162	. = ALIGN(16);
163	.machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
164		machvec_start = .;
165		*(.machvec)
166		machvec_end = .;
167	}
168#endif
169
170#ifdef	CONFIG_SMP
171	. = ALIGN(PERCPU_PAGE_SIZE);
172	__cpu0_per_cpu = .;
173	. = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
174#endif
175
176	. = ALIGN(PAGE_SIZE);
177	__init_end = .;
178
179	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
180		PAGE_ALIGNED_DATA(PAGE_SIZE)
181		. = ALIGN(PAGE_SIZE);
182		__start_gate_section = .;
183		*(.data..gate)
184		__stop_gate_section = .;
185#ifdef CONFIG_XEN
186		. = ALIGN(PAGE_SIZE);
187		__xen_start_gate_section = .;
188		*(.data..gate.xen)
189		__xen_stop_gate_section = .;
190#endif
191	}
192	/*
193	 * make sure the gate page doesn't expose
194	 * kernel data
195	 */
196	. = ALIGN(PAGE_SIZE);
197
198	/* Per-cpu data: */
199	. = ALIGN(PERCPU_PAGE_SIZE);
200	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
201	__phys_per_cpu_start = __per_cpu_load;
202	/*
203	 * ensure percpu data fits
204	 * into percpu page size
205	 */
206	. = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
207
208	data : {
209	} :data
210	.data : AT(ADDR(.data) - LOAD_OFFSET) {
211		_sdata  =  .;
212		INIT_TASK_DATA(PAGE_SIZE)
213		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
214		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
215		DATA_DATA
216		*(.data1)
217		*(.gnu.linkonce.d*)
218		CONSTRUCTORS
219	}
 
 
220
221	. = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
222	.got : AT(ADDR(.got) - LOAD_OFFSET) {
223		*(.got.plt)
224		*(.got)
225	}
226	__gp = ADDR(.got) + 0x200000;
227
228	/*
229	 * We want the small data sections together,
230	 * so single-instruction offsets can access
231	 * them all, and initialized data all before
232	 * uninitialized, so we can shorten the
233	 * on-disk segment size.
234	 */
235	.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
236		*(.sdata)
237		*(.sdata1)
238		*(.srdata)
239	}
240	_edata  =  .;
241
242	BSS_SECTION(0, 0, 0)
243
244	_end = .;
245
246	code : {
247	} :code
248
249	STABS_DEBUG
250	DWARF_DEBUG
251
252	/* Default discards */
253	DISCARDS
254}