Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#include <asm/cache.h>
  4#include <asm/ptrace.h>
 
  5#include <asm/pgtable.h>
  6#include <asm/thread_info.h>
  7
  8#include <asm-generic/vmlinux.lds.h>
  9
 10OUTPUT_FORMAT("elf64-ia64-little")
 11OUTPUT_ARCH(ia64)
 12ENTRY(phys_start)
 13jiffies = jiffies_64;
 14
 15PHDRS {
 16	code   PT_LOAD;
 17	percpu PT_LOAD;
 18	data   PT_LOAD;
 19	note   PT_NOTE;
 20	unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
 21}
 22
 23SECTIONS {
 24	/*
 25	 * unwind exit sections must be discarded before
 26	 * the rest of the sections get included.
 27	 */
 28	/DISCARD/ : {
 29		*(.IA_64.unwind.exit.text)
 30		*(.IA_64.unwind_info.exit.text)
 31		*(.comment)
 32		*(.note)
 33	}
 34
 35	v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
 36	phys_start = _start - LOAD_OFFSET;
 37
 38	code : {
 39	} :code
 40	. = KERNEL_START;
 41
 42	_text = .;
 43	_stext = .;
 44
 45	.text : AT(ADDR(.text) - LOAD_OFFSET) {
 46		__start_ivt_text = .;
 47		*(.text..ivt)
 48		__end_ivt_text = .;
 49		TEXT_TEXT
 50		SCHED_TEXT
 51		CPUIDLE_TEXT
 52		LOCK_TEXT
 53		KPROBES_TEXT
 54		*(.gnu.linkonce.t*)
 55	}
 56
 57	.text2 : AT(ADDR(.text2) - LOAD_OFFSET)	{
 58		*(.text2)
 59	}
 60
 61#ifdef CONFIG_SMP
 62	.text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
 63		*(.text..lock)
 64	}
 65#endif
 66	_etext = .;
 67
 68	/*
 69	 * Read-only data
 70	 */
 71	NOTES :code :note       /* put .notes in text and mark in PT_NOTE  */
 72	code_continues : {
 73	} : code               /* switch back to regular program...  */
 74
 75	EXCEPTION_TABLE(16)
 76
 77	/* MCA table */
 78	. = ALIGN(16);
 79	__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
 80		__start___mca_table = .;
 81		*(__mca_table)
 82		__stop___mca_table = .;
 83	}
 84
 85	.data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
 86		__start___phys_stack_reg_patchlist = .;
 87		*(.data..patch.phys_stack_reg)
 88		__end___phys_stack_reg_patchlist = .;
 89	}
 90
 91	/*
 92	 * Global data
 93	 */
 94	_data = .;
 95
 96	/* Unwind info & table: */
 97	. = ALIGN(8);
 98	.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
 99		*(.IA_64.unwind_info*)
100	}
101	.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
102		__start_unwind = .;
103		*(.IA_64.unwind*)
104		__end_unwind = .;
105	} :code :unwind
106	code_continues2 : {
107	} : code
108
109	RODATA
110
111	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
112		__start_opd = .;
113		*(.opd)
114		__end_opd = .;
115	}
116
117	/*
118	 * Initialization code and data:
119	 */
120	. = ALIGN(PAGE_SIZE);
121	__init_begin = .;
122
123	INIT_TEXT_SECTION(PAGE_SIZE)
124	INIT_DATA_SECTION(16)
125
126	.data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
127		__start___vtop_patchlist = .;
128		*(.data..patch.vtop)
129		__end___vtop_patchlist = .;
130	}
131
132	.data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
133		__start___rse_patchlist = .;
134		*(.data..patch.rse)
135		__end___rse_patchlist = .;
136	}
137
138	.data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
139		__start___mckinley_e9_bundles = .;
140		*(.data..patch.mckinley_e9)
141		__end___mckinley_e9_bundles = .;
142	}
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144#if defined(CONFIG_IA64_GENERIC)
145	/* Machine Vector */
146	. = ALIGN(16);
147	.machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
148		machvec_start = .;
149		*(.machvec)
150		machvec_end = .;
151	}
152#endif
153
154#ifdef	CONFIG_SMP
155	. = ALIGN(PERCPU_PAGE_SIZE);
156	__cpu0_per_cpu = .;
157	. = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
158#endif
159
160	. = ALIGN(PAGE_SIZE);
161	__init_end = .;
162
163	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
164		PAGE_ALIGNED_DATA(PAGE_SIZE)
165		. = ALIGN(PAGE_SIZE);
166		__start_gate_section = .;
167		*(.data..gate)
168		__stop_gate_section = .;
 
 
 
 
 
 
169	}
170	/*
171	 * make sure the gate page doesn't expose
172	 * kernel data
173	 */
174	. = ALIGN(PAGE_SIZE);
175
176	/* Per-cpu data: */
177	. = ALIGN(PERCPU_PAGE_SIZE);
178	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
179	__phys_per_cpu_start = __per_cpu_load;
180	/*
181	 * ensure percpu data fits
182	 * into percpu page size
183	 */
184	. = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
185
186	data : {
187	} :data
188	.data : AT(ADDR(.data) - LOAD_OFFSET) {
189		_sdata  =  .;
190		INIT_TASK_DATA(PAGE_SIZE)
191		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
192		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
193		DATA_DATA
194		*(.data1)
195		*(.gnu.linkonce.d*)
196		CONSTRUCTORS
197	}
198
199	BUG_TABLE
200
201	. = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
202	.got : AT(ADDR(.got) - LOAD_OFFSET) {
203		*(.got.plt)
204		*(.got)
205	}
206	__gp = ADDR(.got) + 0x200000;
207
208	/*
209	 * We want the small data sections together,
210	 * so single-instruction offsets can access
211	 * them all, and initialized data all before
212	 * uninitialized, so we can shorten the
213	 * on-disk segment size.
214	 */
215	.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
216		*(.sdata)
217		*(.sdata1)
218		*(.srdata)
219	}
220	_edata  =  .;
221
222	BSS_SECTION(0, 0, 0)
223
224	_end = .;
225
226	code : {
227	} :code
228
229	STABS_DEBUG
230	DWARF_DEBUG
231
232	/* Default discards */
233	DISCARDS
234}
v3.1
 
  1
  2#include <asm/cache.h>
  3#include <asm/ptrace.h>
  4#include <asm/system.h>
  5#include <asm/pgtable.h>
 
  6
  7#include <asm-generic/vmlinux.lds.h>
  8
  9OUTPUT_FORMAT("elf64-ia64-little")
 10OUTPUT_ARCH(ia64)
 11ENTRY(phys_start)
 12jiffies = jiffies_64;
 13
 14PHDRS {
 15	code   PT_LOAD;
 16	percpu PT_LOAD;
 17	data   PT_LOAD;
 18	note   PT_NOTE;
 19	unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
 20}
 21
 22SECTIONS {
 23	/*
 24	 * unwind exit sections must be discarded before
 25	 * the rest of the sections get included.
 26	 */
 27	/DISCARD/ : {
 28		*(.IA_64.unwind.exit.text)
 29		*(.IA_64.unwind_info.exit.text)
 30		*(.comment)
 31		*(.note)
 32	}
 33
 34	v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
 35	phys_start = _start - LOAD_OFFSET;
 36
 37	code : {
 38	} :code
 39	. = KERNEL_START;
 40
 41	_text = .;
 42	_stext = .;
 43
 44	.text : AT(ADDR(.text) - LOAD_OFFSET) {
 45		__start_ivt_text = .;
 46		*(.text..ivt)
 47		__end_ivt_text = .;
 48		TEXT_TEXT
 49		SCHED_TEXT
 
 50		LOCK_TEXT
 51		KPROBES_TEXT
 52		*(.gnu.linkonce.t*)
 53	}
 54
 55	.text2 : AT(ADDR(.text2) - LOAD_OFFSET)	{
 56		*(.text2)
 57	}
 58
 59#ifdef CONFIG_SMP
 60	.text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
 61		*(.text..lock)
 62	}
 63#endif
 64	_etext = .;
 65
 66	/*
 67	 * Read-only data
 68	 */
 69	NOTES :code :note       /* put .notes in text and mark in PT_NOTE  */
 70	code_continues : {
 71	} : code               /* switch back to regular program...  */
 72
 73	EXCEPTION_TABLE(16)
 74
 75	/* MCA table */
 76	. = ALIGN(16);
 77	__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
 78		__start___mca_table = .;
 79		*(__mca_table)
 80		__stop___mca_table = .;
 81	}
 82
 83	.data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
 84		__start___phys_stack_reg_patchlist = .;
 85		*(.data..patch.phys_stack_reg)
 86		__end___phys_stack_reg_patchlist = .;
 87	}
 88
 89	/*
 90	 * Global data
 91	 */
 92	_data = .;
 93
 94	/* Unwind info & table: */
 95	. = ALIGN(8);
 96	.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
 97		*(.IA_64.unwind_info*)
 98	}
 99	.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
100		__start_unwind = .;
101		*(.IA_64.unwind*)
102		__end_unwind = .;
103	} :code :unwind
104	code_continues2 : {
105	} : code
106
107	RODATA
108
109	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
 
110		*(.opd)
 
111	}
112
113	/*
114	 * Initialization code and data:
115	 */
116	. = ALIGN(PAGE_SIZE);
117	__init_begin = .;
118
119	INIT_TEXT_SECTION(PAGE_SIZE)
120	INIT_DATA_SECTION(16)
121
122	.data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
123		__start___vtop_patchlist = .;
124		*(.data..patch.vtop)
125		__end___vtop_patchlist = .;
126	}
127
128	.data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
129		__start___rse_patchlist = .;
130		*(.data..patch.rse)
131		__end___rse_patchlist = .;
132	}
133
134	.data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
135		__start___mckinley_e9_bundles = .;
136		*(.data..patch.mckinley_e9)
137		__end___mckinley_e9_bundles = .;
138	}
139
140#if defined(CONFIG_PARAVIRT)
141	. = ALIGN(16);
142	.paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
143		__start_paravirt_bundles = .;
144		*(.paravirt_bundles)
145		__stop_paravirt_bundles = .;
146	}
147	. = ALIGN(16);
148	.paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
149		__start_paravirt_insts = .;
150		*(.paravirt_insts)
151		__stop_paravirt_insts = .;
152	}
153	. = ALIGN(16);
154	.paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
155		__start_paravirt_branches = .;
156		*(.paravirt_branches)
157		__stop_paravirt_branches = .;
158	}
159#endif
160
161#if defined(CONFIG_IA64_GENERIC)
162	/* Machine Vector */
163	. = ALIGN(16);
164	.machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
165		machvec_start = .;
166		*(.machvec)
167		machvec_end = .;
168	}
169#endif
170
171#ifdef	CONFIG_SMP
172	. = ALIGN(PERCPU_PAGE_SIZE);
173	__cpu0_per_cpu = .;
174	. = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
175#endif
176
177	. = ALIGN(PAGE_SIZE);
178	__init_end = .;
179
180	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
181		PAGE_ALIGNED_DATA(PAGE_SIZE)
182		. = ALIGN(PAGE_SIZE);
183		__start_gate_section = .;
184		*(.data..gate)
185		__stop_gate_section = .;
186#ifdef CONFIG_XEN
187		. = ALIGN(PAGE_SIZE);
188		__xen_start_gate_section = .;
189		*(.data..gate.xen)
190		__xen_stop_gate_section = .;
191#endif
192	}
193	/*
194	 * make sure the gate page doesn't expose
195	 * kernel data
196	 */
197	. = ALIGN(PAGE_SIZE);
198
199	/* Per-cpu data: */
200	. = ALIGN(PERCPU_PAGE_SIZE);
201	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202	__phys_per_cpu_start = __per_cpu_load;
203	/*
204	 * ensure percpu data fits
205	 * into percpu page size
206	 */
207	. = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
208
209	data : {
210	} :data
211	.data : AT(ADDR(.data) - LOAD_OFFSET) {
212		_sdata  =  .;
213		INIT_TASK_DATA(PAGE_SIZE)
214		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
215		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
216		DATA_DATA
217		*(.data1)
218		*(.gnu.linkonce.d*)
219		CONSTRUCTORS
220	}
 
 
221
222	. = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
223	.got : AT(ADDR(.got) - LOAD_OFFSET) {
224		*(.got.plt)
225		*(.got)
226	}
227	__gp = ADDR(.got) + 0x200000;
228
229	/*
230	 * We want the small data sections together,
231	 * so single-instruction offsets can access
232	 * them all, and initialized data all before
233	 * uninitialized, so we can shorten the
234	 * on-disk segment size.
235	 */
236	.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
237		*(.sdata)
238		*(.sdata1)
239		*(.srdata)
240	}
241	_edata  =  .;
242
243	BSS_SECTION(0, 0, 0)
244
245	_end = .;
246
247	code : {
248	} :code
249
250	STABS_DEBUG
251	DWARF_DEBUG
252
253	/* Default discards */
254	DISCARDS
255}