Loading...
Note: File does not exist in v6.13.7.
1
2#include <asm/cache.h>
3#include <asm/ptrace.h>
4#include <asm/system.h>
5#include <asm/pgtable.h>
6
7#include <asm-generic/vmlinux.lds.h>
8
9OUTPUT_FORMAT("elf64-ia64-little")
10OUTPUT_ARCH(ia64)
11ENTRY(phys_start)
12jiffies = jiffies_64;
13
14PHDRS {
15 code PT_LOAD;
16 percpu PT_LOAD;
17 data PT_LOAD;
18 note PT_NOTE;
19 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
20}
21
22SECTIONS {
23 /*
24 * unwind exit sections must be discarded before
25 * the rest of the sections get included.
26 */
27 /DISCARD/ : {
28 *(.IA_64.unwind.exit.text)
29 *(.IA_64.unwind_info.exit.text)
30 *(.comment)
31 *(.note)
32 }
33
34 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
35 phys_start = _start - LOAD_OFFSET;
36
37 code : {
38 } :code
39 . = KERNEL_START;
40
41 _text = .;
42 _stext = .;
43
44 .text : AT(ADDR(.text) - LOAD_OFFSET) {
45 __start_ivt_text = .;
46 *(.text..ivt)
47 __end_ivt_text = .;
48 TEXT_TEXT
49 SCHED_TEXT
50 LOCK_TEXT
51 KPROBES_TEXT
52 *(.gnu.linkonce.t*)
53 }
54
55 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
56 *(.text2)
57 }
58
59#ifdef CONFIG_SMP
60 .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
61 *(.text..lock)
62 }
63#endif
64 _etext = .;
65
66 /*
67 * Read-only data
68 */
69 NOTES :code :note /* put .notes in text and mark in PT_NOTE */
70 code_continues : {
71 } : code /* switch back to regular program... */
72
73 EXCEPTION_TABLE(16)
74
75 /* MCA table */
76 . = ALIGN(16);
77 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
78 __start___mca_table = .;
79 *(__mca_table)
80 __stop___mca_table = .;
81 }
82
83 .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
84 __start___phys_stack_reg_patchlist = .;
85 *(.data..patch.phys_stack_reg)
86 __end___phys_stack_reg_patchlist = .;
87 }
88
89 /*
90 * Global data
91 */
92 _data = .;
93
94 /* Unwind info & table: */
95 . = ALIGN(8);
96 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
97 *(.IA_64.unwind_info*)
98 }
99 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
100 __start_unwind = .;
101 *(.IA_64.unwind*)
102 __end_unwind = .;
103 } :code :unwind
104 code_continues2 : {
105 } : code
106
107 RODATA
108
109 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
110 *(.opd)
111 }
112
113 /*
114 * Initialization code and data:
115 */
116 . = ALIGN(PAGE_SIZE);
117 __init_begin = .;
118
119 INIT_TEXT_SECTION(PAGE_SIZE)
120 INIT_DATA_SECTION(16)
121
122 .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
123 __start___vtop_patchlist = .;
124 *(.data..patch.vtop)
125 __end___vtop_patchlist = .;
126 }
127
128 .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
129 __start___rse_patchlist = .;
130 *(.data..patch.rse)
131 __end___rse_patchlist = .;
132 }
133
134 .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
135 __start___mckinley_e9_bundles = .;
136 *(.data..patch.mckinley_e9)
137 __end___mckinley_e9_bundles = .;
138 }
139
140#if defined(CONFIG_PARAVIRT)
141 . = ALIGN(16);
142 .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
143 __start_paravirt_bundles = .;
144 *(.paravirt_bundles)
145 __stop_paravirt_bundles = .;
146 }
147 . = ALIGN(16);
148 .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
149 __start_paravirt_insts = .;
150 *(.paravirt_insts)
151 __stop_paravirt_insts = .;
152 }
153 . = ALIGN(16);
154 .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
155 __start_paravirt_branches = .;
156 *(.paravirt_branches)
157 __stop_paravirt_branches = .;
158 }
159#endif
160
161#if defined(CONFIG_IA64_GENERIC)
162 /* Machine Vector */
163 . = ALIGN(16);
164 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
165 machvec_start = .;
166 *(.machvec)
167 machvec_end = .;
168 }
169#endif
170
171#ifdef CONFIG_SMP
172 . = ALIGN(PERCPU_PAGE_SIZE);
173 __cpu0_per_cpu = .;
174 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
175#endif
176
177 . = ALIGN(PAGE_SIZE);
178 __init_end = .;
179
180 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
181 PAGE_ALIGNED_DATA(PAGE_SIZE)
182 . = ALIGN(PAGE_SIZE);
183 __start_gate_section = .;
184 *(.data..gate)
185 __stop_gate_section = .;
186#ifdef CONFIG_XEN
187 . = ALIGN(PAGE_SIZE);
188 __xen_start_gate_section = .;
189 *(.data..gate.xen)
190 __xen_stop_gate_section = .;
191#endif
192 }
193 /*
194 * make sure the gate page doesn't expose
195 * kernel data
196 */
197 . = ALIGN(PAGE_SIZE);
198
199 /* Per-cpu data: */
200 . = ALIGN(PERCPU_PAGE_SIZE);
201 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202 __phys_per_cpu_start = __per_cpu_load;
203 /*
204 * ensure percpu data fits
205 * into percpu page size
206 */
207 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
208
209 data : {
210 } :data
211 .data : AT(ADDR(.data) - LOAD_OFFSET) {
212 _sdata = .;
213 INIT_TASK_DATA(PAGE_SIZE)
214 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
215 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
216 DATA_DATA
217 *(.data1)
218 *(.gnu.linkonce.d*)
219 CONSTRUCTORS
220 }
221
222 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
223 .got : AT(ADDR(.got) - LOAD_OFFSET) {
224 *(.got.plt)
225 *(.got)
226 }
227 __gp = ADDR(.got) + 0x200000;
228
229 /*
230 * We want the small data sections together,
231 * so single-instruction offsets can access
232 * them all, and initialized data all before
233 * uninitialized, so we can shorten the
234 * on-disk segment size.
235 */
236 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
237 *(.sdata)
238 *(.sdata1)
239 *(.srdata)
240 }
241 _edata = .;
242
243 BSS_SECTION(0, 0, 0)
244
245 _end = .;
246
247 code : {
248 } :code
249
250 STABS_DEBUG
251 DWARF_DEBUG
252
253 /* Default discards */
254 DISCARDS
255}