Loading...
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#include <asm-generic/vmlinux.lds.h>
7#include <asm/cache.h>
8#include <asm/thread_info.h>
9#include <asm/memory.h>
10#include <asm/page.h>
11
12#define PROC_INFO \
13 . = ALIGN(4); \
14 VMLINUX_SYMBOL(__proc_info_begin) = .; \
15 *(.proc.info.init) \
16 VMLINUX_SYMBOL(__proc_info_end) = .;
17
18#define IDMAP_TEXT \
19 ALIGN_FUNCTION(); \
20 VMLINUX_SYMBOL(__idmap_text_start) = .; \
21 *(.idmap.text) \
22 VMLINUX_SYMBOL(__idmap_text_end) = .; \
23 . = ALIGN(32); \
24 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
25 *(.hyp.idmap.text) \
26 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
27
28#ifdef CONFIG_HOTPLUG_CPU
29#define ARM_CPU_DISCARD(x)
30#define ARM_CPU_KEEP(x) x
31#else
32#define ARM_CPU_DISCARD(x) x
33#define ARM_CPU_KEEP(x)
34#endif
35
36#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
37 defined(CONFIG_GENERIC_BUG)
38#define ARM_EXIT_KEEP(x) x
39#define ARM_EXIT_DISCARD(x)
40#else
41#define ARM_EXIT_KEEP(x)
42#define ARM_EXIT_DISCARD(x) x
43#endif
44
45OUTPUT_ARCH(arm)
46ENTRY(stext)
47
48#ifndef __ARMEB__
49jiffies = jiffies_64;
50#else
51jiffies = jiffies_64 + 4;
52#endif
53
54SECTIONS
55{
56 /*
57 * XXX: The linker does not define how output sections are
58 * assigned to input sections when there are multiple statements
59 * matching the same input section name. There is no documented
60 * order of matching.
61 *
62 * unwind exit sections must be discarded before the rest of the
63 * unwind sections get included.
64 */
65 /DISCARD/ : {
66 *(.ARM.exidx.exit.text)
67 *(.ARM.extab.exit.text)
68 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
69 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
70 ARM_EXIT_DISCARD(EXIT_TEXT)
71 ARM_EXIT_DISCARD(EXIT_DATA)
72 EXIT_CALL
73#ifndef CONFIG_MMU
74 *(.fixup)
75 *(__ex_table)
76#endif
77#ifndef CONFIG_SMP_ON_UP
78 *(.alt.smp.init)
79#endif
80 *(.discard)
81 *(.discard.*)
82 }
83
84#ifdef CONFIG_XIP_KERNEL
85 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
86#else
87 . = PAGE_OFFSET + TEXT_OFFSET;
88#endif
89 .head.text : {
90 _text = .;
91 HEAD_TEXT
92 }
93 .text : { /* Real text segment */
94 _stext = .; /* Text and read-only data */
95 __exception_text_start = .;
96 *(.exception.text)
97 __exception_text_end = .;
98 IRQENTRY_TEXT
99 TEXT_TEXT
100 SCHED_TEXT
101 LOCK_TEXT
102 KPROBES_TEXT
103 IDMAP_TEXT
104#ifdef CONFIG_MMU
105 *(.fixup)
106#endif
107 *(.gnu.warning)
108 *(.glue_7)
109 *(.glue_7t)
110 . = ALIGN(4);
111 *(.got) /* Global offset table */
112 ARM_CPU_KEEP(PROC_INFO)
113 }
114
115 RO_DATA(PAGE_SIZE)
116
117 . = ALIGN(4);
118 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
119 __start___ex_table = .;
120#ifdef CONFIG_MMU
121 *(__ex_table)
122#endif
123 __stop___ex_table = .;
124 }
125
126#ifdef CONFIG_ARM_UNWIND
127 /*
128 * Stack unwinding tables
129 */
130 . = ALIGN(8);
131 .ARM.unwind_idx : {
132 __start_unwind_idx = .;
133 *(.ARM.exidx*)
134 __stop_unwind_idx = .;
135 }
136 .ARM.unwind_tab : {
137 __start_unwind_tab = .;
138 *(.ARM.extab*)
139 __stop_unwind_tab = .;
140 }
141#endif
142
143 NOTES
144
145 _etext = .; /* End of text and rodata section */
146
147#ifndef CONFIG_XIP_KERNEL
148 . = ALIGN(PAGE_SIZE);
149 __init_begin = .;
150#endif
151 /*
152 * The vectors and stubs are relocatable code, and the
153 * only thing that matters is their relative offsets
154 */
155 __vectors_start = .;
156 .vectors 0 : AT(__vectors_start) {
157 *(.vectors)
158 }
159 . = __vectors_start + SIZEOF(.vectors);
160 __vectors_end = .;
161
162 __stubs_start = .;
163 .stubs 0x1000 : AT(__stubs_start) {
164 *(.stubs)
165 }
166 . = __stubs_start + SIZEOF(.stubs);
167 __stubs_end = .;
168
169 INIT_TEXT_SECTION(8)
170 .exit.text : {
171 ARM_EXIT_KEEP(EXIT_TEXT)
172 }
173 .init.proc.info : {
174 ARM_CPU_DISCARD(PROC_INFO)
175 }
176 .init.arch.info : {
177 __arch_info_begin = .;
178 *(.arch.info.init)
179 __arch_info_end = .;
180 }
181 .init.tagtable : {
182 __tagtable_begin = .;
183 *(.taglist.init)
184 __tagtable_end = .;
185 }
186#ifdef CONFIG_SMP_ON_UP
187 .init.smpalt : {
188 __smpalt_begin = .;
189 *(.alt.smp.init)
190 __smpalt_end = .;
191 }
192#endif
193 .init.pv_table : {
194 __pv_table_begin = .;
195 *(.pv_table)
196 __pv_table_end = .;
197 }
198 .init.data : {
199#ifndef CONFIG_XIP_KERNEL
200 INIT_DATA
201#endif
202 INIT_SETUP(16)
203 INIT_CALLS
204 CON_INITCALL
205 SECURITY_INITCALL
206 INIT_RAM_FS
207 }
208#ifndef CONFIG_XIP_KERNEL
209 .exit.data : {
210 ARM_EXIT_KEEP(EXIT_DATA)
211 }
212#endif
213
214#ifdef CONFIG_SMP
215 PERCPU_SECTION(L1_CACHE_BYTES)
216#endif
217
218#ifdef CONFIG_XIP_KERNEL
219 __data_loc = ALIGN(4); /* location in binary */
220 . = PAGE_OFFSET + TEXT_OFFSET;
221#else
222 __init_end = .;
223 . = ALIGN(THREAD_SIZE);
224 __data_loc = .;
225#endif
226
227 .data : AT(__data_loc) {
228 _data = .; /* address in memory */
229 _sdata = .;
230
231 /*
232 * first, the init task union, aligned
233 * to an 8192 byte boundary.
234 */
235 INIT_TASK_DATA(THREAD_SIZE)
236
237#ifdef CONFIG_XIP_KERNEL
238 . = ALIGN(PAGE_SIZE);
239 __init_begin = .;
240 INIT_DATA
241 ARM_EXIT_KEEP(EXIT_DATA)
242 . = ALIGN(PAGE_SIZE);
243 __init_end = .;
244#endif
245
246 NOSAVE_DATA
247 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
248 READ_MOSTLY_DATA(L1_CACHE_BYTES)
249
250 /*
251 * and the usual data section
252 */
253 DATA_DATA
254 CONSTRUCTORS
255
256 _edata = .;
257 }
258 _edata_loc = __data_loc + SIZEOF(.data);
259
260#ifdef CONFIG_HAVE_TCM
261 /*
262 * We align everything to a page boundary so we can
263 * free it after init has commenced and TCM contents have
264 * been copied to its destination.
265 */
266 .tcm_start : {
267 . = ALIGN(PAGE_SIZE);
268 __tcm_start = .;
269 __itcm_start = .;
270 }
271
272 /*
273 * Link these to the ITCM RAM
274 * Put VMA to the TCM address and LMA to the common RAM
275 * and we'll upload the contents from RAM to TCM and free
276 * the used RAM after that.
277 */
278 .text_itcm ITCM_OFFSET : AT(__itcm_start)
279 {
280 __sitcm_text = .;
281 *(.tcm.text)
282 *(.tcm.rodata)
283 . = ALIGN(4);
284 __eitcm_text = .;
285 }
286
287 /*
288 * Reset the dot pointer, this is needed to create the
289 * relative __dtcm_start below (to be used as extern in code).
290 */
291 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
292
293 .dtcm_start : {
294 __dtcm_start = .;
295 }
296
297 /* TODO: add remainder of ITCM as well, that can be used for data! */
298 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
299 {
300 . = ALIGN(4);
301 __sdtcm_data = .;
302 *(.tcm.data)
303 . = ALIGN(4);
304 __edtcm_data = .;
305 }
306
307 /* Reset the dot pointer or the linker gets confused */
308 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
309
310 /* End marker for freeing TCM copy in linked object */
311 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
312 . = ALIGN(PAGE_SIZE);
313 __tcm_end = .;
314 }
315#endif
316
317 BSS_SECTION(0, 0, 0)
318 _end = .;
319
320 STABS_DEBUG
321 .comment 0 : { *(.comment) }
322}
323
324/*
325 * These must never be empty
326 * If you have to comment these two assert statements out, your
327 * binutils is too old (for other reasons as well)
328 */
329ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
330ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
331/*
332 * The HYP init code can't be more than a page long.
333 * The above comment applies as well.
334 */
335ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big")
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#ifdef CONFIG_XIP_KERNEL
7#include "vmlinux-xip.lds.S"
8#else
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/thread_info.h>
13#include <asm/memory.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#define PROC_INFO \
18 . = ALIGN(4); \
19 VMLINUX_SYMBOL(__proc_info_begin) = .; \
20 *(.proc.info.init) \
21 VMLINUX_SYMBOL(__proc_info_end) = .;
22
23#define HYPERVISOR_TEXT \
24 VMLINUX_SYMBOL(__hyp_text_start) = .; \
25 *(.hyp.text) \
26 VMLINUX_SYMBOL(__hyp_text_end) = .;
27
28#define IDMAP_TEXT \
29 ALIGN_FUNCTION(); \
30 VMLINUX_SYMBOL(__idmap_text_start) = .; \
31 *(.idmap.text) \
32 VMLINUX_SYMBOL(__idmap_text_end) = .; \
33 . = ALIGN(PAGE_SIZE); \
34 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
35 *(.hyp.idmap.text) \
36 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
37
38#ifdef CONFIG_HOTPLUG_CPU
39#define ARM_CPU_DISCARD(x)
40#define ARM_CPU_KEEP(x) x
41#else
42#define ARM_CPU_DISCARD(x) x
43#define ARM_CPU_KEEP(x)
44#endif
45
46#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
47 defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
48#define ARM_EXIT_KEEP(x) x
49#define ARM_EXIT_DISCARD(x)
50#else
51#define ARM_EXIT_KEEP(x)
52#define ARM_EXIT_DISCARD(x) x
53#endif
54
55OUTPUT_ARCH(arm)
56ENTRY(stext)
57
58#ifndef __ARMEB__
59jiffies = jiffies_64;
60#else
61jiffies = jiffies_64 + 4;
62#endif
63
64SECTIONS
65{
66 /*
67 * XXX: The linker does not define how output sections are
68 * assigned to input sections when there are multiple statements
69 * matching the same input section name. There is no documented
70 * order of matching.
71 *
72 * unwind exit sections must be discarded before the rest of the
73 * unwind sections get included.
74 */
75 /DISCARD/ : {
76 *(.ARM.exidx.exit.text)
77 *(.ARM.extab.exit.text)
78 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
79 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
80 ARM_EXIT_DISCARD(EXIT_TEXT)
81 ARM_EXIT_DISCARD(EXIT_DATA)
82 EXIT_CALL
83#ifndef CONFIG_MMU
84 *(.text.fixup)
85 *(__ex_table)
86#endif
87#ifndef CONFIG_SMP_ON_UP
88 *(.alt.smp.init)
89#endif
90 *(.discard)
91 *(.discard.*)
92 }
93
94 . = PAGE_OFFSET + TEXT_OFFSET;
95 .head.text : {
96 _text = .;
97 HEAD_TEXT
98 }
99
100#ifdef CONFIG_DEBUG_RODATA
101 . = ALIGN(1<<SECTION_SHIFT);
102#endif
103
104 .text : { /* Real text segment */
105 _stext = .; /* Text and read-only data */
106 IDMAP_TEXT
107 __exception_text_start = .;
108 *(.exception.text)
109 __exception_text_end = .;
110 IRQENTRY_TEXT
111 SOFTIRQENTRY_TEXT
112 TEXT_TEXT
113 SCHED_TEXT
114 CPUIDLE_TEXT
115 LOCK_TEXT
116 HYPERVISOR_TEXT
117 KPROBES_TEXT
118 *(.gnu.warning)
119 *(.glue_7)
120 *(.glue_7t)
121 . = ALIGN(4);
122 *(.got) /* Global offset table */
123 ARM_CPU_KEEP(PROC_INFO)
124 }
125
126#ifdef CONFIG_DEBUG_ALIGN_RODATA
127 . = ALIGN(1<<SECTION_SHIFT);
128#endif
129 _etext = .; /* End of text section */
130
131 RO_DATA(PAGE_SIZE)
132
133 . = ALIGN(4);
134 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
135 __start___ex_table = .;
136#ifdef CONFIG_MMU
137 *(__ex_table)
138#endif
139 __stop___ex_table = .;
140 }
141
142#ifdef CONFIG_ARM_UNWIND
143 /*
144 * Stack unwinding tables
145 */
146 . = ALIGN(8);
147 .ARM.unwind_idx : {
148 __start_unwind_idx = .;
149 *(.ARM.exidx*)
150 __stop_unwind_idx = .;
151 }
152 .ARM.unwind_tab : {
153 __start_unwind_tab = .;
154 *(.ARM.extab*)
155 __stop_unwind_tab = .;
156 }
157#endif
158
159 NOTES
160
161#ifdef CONFIG_DEBUG_RODATA
162 . = ALIGN(1<<SECTION_SHIFT);
163#else
164 . = ALIGN(PAGE_SIZE);
165#endif
166 __init_begin = .;
167
168 /*
169 * The vectors and stubs are relocatable code, and the
170 * only thing that matters is their relative offsets
171 */
172 __vectors_start = .;
173 .vectors 0xffff0000 : AT(__vectors_start) {
174 *(.vectors)
175 }
176 . = __vectors_start + SIZEOF(.vectors);
177 __vectors_end = .;
178
179 __stubs_start = .;
180 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
181 *(.stubs)
182 }
183 . = __stubs_start + SIZEOF(.stubs);
184 __stubs_end = .;
185
186 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
187
188 INIT_TEXT_SECTION(8)
189 .exit.text : {
190 ARM_EXIT_KEEP(EXIT_TEXT)
191 }
192 .init.proc.info : {
193 ARM_CPU_DISCARD(PROC_INFO)
194 }
195 .init.arch.info : {
196 __arch_info_begin = .;
197 *(.arch.info.init)
198 __arch_info_end = .;
199 }
200 .init.tagtable : {
201 __tagtable_begin = .;
202 *(.taglist.init)
203 __tagtable_end = .;
204 }
205#ifdef CONFIG_SMP_ON_UP
206 .init.smpalt : {
207 __smpalt_begin = .;
208 *(.alt.smp.init)
209 __smpalt_end = .;
210 }
211#endif
212 .init.pv_table : {
213 __pv_table_begin = .;
214 *(.pv_table)
215 __pv_table_end = .;
216 }
217 .init.data : {
218 INIT_DATA
219 INIT_SETUP(16)
220 INIT_CALLS
221 CON_INITCALL
222 SECURITY_INITCALL
223 INIT_RAM_FS
224 }
225 .exit.data : {
226 ARM_EXIT_KEEP(EXIT_DATA)
227 }
228
229#ifdef CONFIG_SMP
230 PERCPU_SECTION(L1_CACHE_BYTES)
231#endif
232
233#ifdef CONFIG_DEBUG_RODATA
234 . = ALIGN(1<<SECTION_SHIFT);
235#else
236 . = ALIGN(THREAD_SIZE);
237#endif
238 __init_end = .;
239 __data_loc = .;
240
241 .data : AT(__data_loc) {
242 _data = .; /* address in memory */
243 _sdata = .;
244
245 /*
246 * first, the init task union, aligned
247 * to an 8192 byte boundary.
248 */
249 INIT_TASK_DATA(THREAD_SIZE)
250
251 NOSAVE_DATA
252 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
253 READ_MOSTLY_DATA(L1_CACHE_BYTES)
254
255 /*
256 * and the usual data section
257 */
258 DATA_DATA
259 CONSTRUCTORS
260
261 _edata = .;
262 }
263 _edata_loc = __data_loc + SIZEOF(.data);
264
265#ifdef CONFIG_HAVE_TCM
266 /*
267 * We align everything to a page boundary so we can
268 * free it after init has commenced and TCM contents have
269 * been copied to its destination.
270 */
271 .tcm_start : {
272 . = ALIGN(PAGE_SIZE);
273 __tcm_start = .;
274 __itcm_start = .;
275 }
276
277 /*
278 * Link these to the ITCM RAM
279 * Put VMA to the TCM address and LMA to the common RAM
280 * and we'll upload the contents from RAM to TCM and free
281 * the used RAM after that.
282 */
283 .text_itcm ITCM_OFFSET : AT(__itcm_start)
284 {
285 __sitcm_text = .;
286 *(.tcm.text)
287 *(.tcm.rodata)
288 . = ALIGN(4);
289 __eitcm_text = .;
290 }
291
292 /*
293 * Reset the dot pointer, this is needed to create the
294 * relative __dtcm_start below (to be used as extern in code).
295 */
296 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
297
298 .dtcm_start : {
299 __dtcm_start = .;
300 }
301
302 /* TODO: add remainder of ITCM as well, that can be used for data! */
303 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
304 {
305 . = ALIGN(4);
306 __sdtcm_data = .;
307 *(.tcm.data)
308 . = ALIGN(4);
309 __edtcm_data = .;
310 }
311
312 /* Reset the dot pointer or the linker gets confused */
313 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
314
315 /* End marker for freeing TCM copy in linked object */
316 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
317 . = ALIGN(PAGE_SIZE);
318 __tcm_end = .;
319 }
320#endif
321
322 BSS_SECTION(0, 0, 0)
323 _end = .;
324
325 STABS_DEBUG
326}
327
328#ifdef CONFIG_DEBUG_RODATA
329/*
330 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
331 * be the first section-aligned location after __start_rodata. Otherwise,
332 * it will be equal to __start_rodata.
333 */
334__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
335#endif
336
337/*
338 * These must never be empty
339 * If you have to comment these two assert statements out, your
340 * binutils is too old (for other reasons as well)
341 */
342ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
343ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
344
345/*
346 * The HYP init code can't be more than a page long,
347 * and should not cross a page boundary.
348 * The above comment applies as well.
349 */
350ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
351 "HYP init code too big or misaligned")
352
353#endif /* CONFIG_XIP_KERNEL */