Loading...
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#include <asm-generic/vmlinux.lds.h>
7#include <asm/cache.h>
8#include <asm/thread_info.h>
9#include <asm/memory.h>
10#include <asm/page.h>
11
12#define PROC_INFO \
13 . = ALIGN(4); \
14 VMLINUX_SYMBOL(__proc_info_begin) = .; \
15 *(.proc.info.init) \
16 VMLINUX_SYMBOL(__proc_info_end) = .;
17
18#define IDMAP_TEXT \
19 ALIGN_FUNCTION(); \
20 VMLINUX_SYMBOL(__idmap_text_start) = .; \
21 *(.idmap.text) \
22 VMLINUX_SYMBOL(__idmap_text_end) = .; \
23 . = ALIGN(32); \
24 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
25 *(.hyp.idmap.text) \
26 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
27
28#ifdef CONFIG_HOTPLUG_CPU
29#define ARM_CPU_DISCARD(x)
30#define ARM_CPU_KEEP(x) x
31#else
32#define ARM_CPU_DISCARD(x) x
33#define ARM_CPU_KEEP(x)
34#endif
35
36#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
37 defined(CONFIG_GENERIC_BUG)
38#define ARM_EXIT_KEEP(x) x
39#define ARM_EXIT_DISCARD(x)
40#else
41#define ARM_EXIT_KEEP(x)
42#define ARM_EXIT_DISCARD(x) x
43#endif
44
45OUTPUT_ARCH(arm)
46ENTRY(stext)
47
48#ifndef __ARMEB__
49jiffies = jiffies_64;
50#else
51jiffies = jiffies_64 + 4;
52#endif
53
54SECTIONS
55{
56 /*
57 * XXX: The linker does not define how output sections are
58 * assigned to input sections when there are multiple statements
59 * matching the same input section name. There is no documented
60 * order of matching.
61 *
62 * unwind exit sections must be discarded before the rest of the
63 * unwind sections get included.
64 */
65 /DISCARD/ : {
66 *(.ARM.exidx.exit.text)
67 *(.ARM.extab.exit.text)
68 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
69 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
70 ARM_EXIT_DISCARD(EXIT_TEXT)
71 ARM_EXIT_DISCARD(EXIT_DATA)
72 EXIT_CALL
73#ifndef CONFIG_MMU
74 *(.fixup)
75 *(__ex_table)
76#endif
77#ifndef CONFIG_SMP_ON_UP
78 *(.alt.smp.init)
79#endif
80 *(.discard)
81 *(.discard.*)
82 }
83
84#ifdef CONFIG_XIP_KERNEL
85 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
86#else
87 . = PAGE_OFFSET + TEXT_OFFSET;
88#endif
89 .head.text : {
90 _text = .;
91 HEAD_TEXT
92 }
93 .text : { /* Real text segment */
94 _stext = .; /* Text and read-only data */
95 __exception_text_start = .;
96 *(.exception.text)
97 __exception_text_end = .;
98 IRQENTRY_TEXT
99 TEXT_TEXT
100 SCHED_TEXT
101 LOCK_TEXT
102 KPROBES_TEXT
103 IDMAP_TEXT
104#ifdef CONFIG_MMU
105 *(.fixup)
106#endif
107 *(.gnu.warning)
108 *(.glue_7)
109 *(.glue_7t)
110 . = ALIGN(4);
111 *(.got) /* Global offset table */
112 ARM_CPU_KEEP(PROC_INFO)
113 }
114
115 RO_DATA(PAGE_SIZE)
116
117 . = ALIGN(4);
118 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
119 __start___ex_table = .;
120#ifdef CONFIG_MMU
121 *(__ex_table)
122#endif
123 __stop___ex_table = .;
124 }
125
126#ifdef CONFIG_ARM_UNWIND
127 /*
128 * Stack unwinding tables
129 */
130 . = ALIGN(8);
131 .ARM.unwind_idx : {
132 __start_unwind_idx = .;
133 *(.ARM.exidx*)
134 __stop_unwind_idx = .;
135 }
136 .ARM.unwind_tab : {
137 __start_unwind_tab = .;
138 *(.ARM.extab*)
139 __stop_unwind_tab = .;
140 }
141#endif
142
143 NOTES
144
145 _etext = .; /* End of text and rodata section */
146
147#ifndef CONFIG_XIP_KERNEL
148 . = ALIGN(PAGE_SIZE);
149 __init_begin = .;
150#endif
151 /*
152 * The vectors and stubs are relocatable code, and the
153 * only thing that matters is their relative offsets
154 */
155 __vectors_start = .;
156 .vectors 0 : AT(__vectors_start) {
157 *(.vectors)
158 }
159 . = __vectors_start + SIZEOF(.vectors);
160 __vectors_end = .;
161
162 __stubs_start = .;
163 .stubs 0x1000 : AT(__stubs_start) {
164 *(.stubs)
165 }
166 . = __stubs_start + SIZEOF(.stubs);
167 __stubs_end = .;
168
169 INIT_TEXT_SECTION(8)
170 .exit.text : {
171 ARM_EXIT_KEEP(EXIT_TEXT)
172 }
173 .init.proc.info : {
174 ARM_CPU_DISCARD(PROC_INFO)
175 }
176 .init.arch.info : {
177 __arch_info_begin = .;
178 *(.arch.info.init)
179 __arch_info_end = .;
180 }
181 .init.tagtable : {
182 __tagtable_begin = .;
183 *(.taglist.init)
184 __tagtable_end = .;
185 }
186#ifdef CONFIG_SMP_ON_UP
187 .init.smpalt : {
188 __smpalt_begin = .;
189 *(.alt.smp.init)
190 __smpalt_end = .;
191 }
192#endif
193 .init.pv_table : {
194 __pv_table_begin = .;
195 *(.pv_table)
196 __pv_table_end = .;
197 }
198 .init.data : {
199#ifndef CONFIG_XIP_KERNEL
200 INIT_DATA
201#endif
202 INIT_SETUP(16)
203 INIT_CALLS
204 CON_INITCALL
205 SECURITY_INITCALL
206 INIT_RAM_FS
207 }
208#ifndef CONFIG_XIP_KERNEL
209 .exit.data : {
210 ARM_EXIT_KEEP(EXIT_DATA)
211 }
212#endif
213
214#ifdef CONFIG_SMP
215 PERCPU_SECTION(L1_CACHE_BYTES)
216#endif
217
218#ifdef CONFIG_XIP_KERNEL
219 __data_loc = ALIGN(4); /* location in binary */
220 . = PAGE_OFFSET + TEXT_OFFSET;
221#else
222 __init_end = .;
223 . = ALIGN(THREAD_SIZE);
224 __data_loc = .;
225#endif
226
227 .data : AT(__data_loc) {
228 _data = .; /* address in memory */
229 _sdata = .;
230
231 /*
232 * first, the init task union, aligned
233 * to an 8192 byte boundary.
234 */
235 INIT_TASK_DATA(THREAD_SIZE)
236
237#ifdef CONFIG_XIP_KERNEL
238 . = ALIGN(PAGE_SIZE);
239 __init_begin = .;
240 INIT_DATA
241 ARM_EXIT_KEEP(EXIT_DATA)
242 . = ALIGN(PAGE_SIZE);
243 __init_end = .;
244#endif
245
246 NOSAVE_DATA
247 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
248 READ_MOSTLY_DATA(L1_CACHE_BYTES)
249
250 /*
251 * and the usual data section
252 */
253 DATA_DATA
254 CONSTRUCTORS
255
256 _edata = .;
257 }
258 _edata_loc = __data_loc + SIZEOF(.data);
259
260#ifdef CONFIG_HAVE_TCM
261 /*
262 * We align everything to a page boundary so we can
263 * free it after init has commenced and TCM contents have
264 * been copied to its destination.
265 */
266 .tcm_start : {
267 . = ALIGN(PAGE_SIZE);
268 __tcm_start = .;
269 __itcm_start = .;
270 }
271
272 /*
273 * Link these to the ITCM RAM
274 * Put VMA to the TCM address and LMA to the common RAM
275 * and we'll upload the contents from RAM to TCM and free
276 * the used RAM after that.
277 */
278 .text_itcm ITCM_OFFSET : AT(__itcm_start)
279 {
280 __sitcm_text = .;
281 *(.tcm.text)
282 *(.tcm.rodata)
283 . = ALIGN(4);
284 __eitcm_text = .;
285 }
286
287 /*
288 * Reset the dot pointer, this is needed to create the
289 * relative __dtcm_start below (to be used as extern in code).
290 */
291 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
292
293 .dtcm_start : {
294 __dtcm_start = .;
295 }
296
297 /* TODO: add remainder of ITCM as well, that can be used for data! */
298 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
299 {
300 . = ALIGN(4);
301 __sdtcm_data = .;
302 *(.tcm.data)
303 . = ALIGN(4);
304 __edtcm_data = .;
305 }
306
307 /* Reset the dot pointer or the linker gets confused */
308 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
309
310 /* End marker for freeing TCM copy in linked object */
311 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
312 . = ALIGN(PAGE_SIZE);
313 __tcm_end = .;
314 }
315#endif
316
317 BSS_SECTION(0, 0, 0)
318 _end = .;
319
320 STABS_DEBUG
321 .comment 0 : { *(.comment) }
322}
323
324/*
325 * These must never be empty
326 * If you have to comment these two assert statements out, your
327 * binutils is too old (for other reasons as well)
328 */
329ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
330ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
331/*
332 * The HYP init code can't be more than a page long.
333 * The above comment applies as well.
334 */
335ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big")
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#ifdef CONFIG_XIP_KERNEL
7#include "vmlinux-xip.lds.S"
8#else
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/thread_info.h>
13#include <asm/memory.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#define PROC_INFO \
18 . = ALIGN(4); \
19 VMLINUX_SYMBOL(__proc_info_begin) = .; \
20 *(.proc.info.init) \
21 VMLINUX_SYMBOL(__proc_info_end) = .;
22
23#define HYPERVISOR_TEXT \
24 VMLINUX_SYMBOL(__hyp_text_start) = .; \
25 *(.hyp.text) \
26 VMLINUX_SYMBOL(__hyp_text_end) = .;
27
28#define IDMAP_TEXT \
29 ALIGN_FUNCTION(); \
30 VMLINUX_SYMBOL(__idmap_text_start) = .; \
31 *(.idmap.text) \
32 VMLINUX_SYMBOL(__idmap_text_end) = .; \
33 . = ALIGN(PAGE_SIZE); \
34 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
35 *(.hyp.idmap.text) \
36 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
37
38#ifdef CONFIG_HOTPLUG_CPU
39#define ARM_CPU_DISCARD(x)
40#define ARM_CPU_KEEP(x) x
41#else
42#define ARM_CPU_DISCARD(x) x
43#define ARM_CPU_KEEP(x)
44#endif
45
46#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
47 defined(CONFIG_GENERIC_BUG)
48#define ARM_EXIT_KEEP(x) x
49#define ARM_EXIT_DISCARD(x)
50#else
51#define ARM_EXIT_KEEP(x)
52#define ARM_EXIT_DISCARD(x) x
53#endif
54
55OUTPUT_ARCH(arm)
56ENTRY(stext)
57
58#ifndef __ARMEB__
59jiffies = jiffies_64;
60#else
61jiffies = jiffies_64 + 4;
62#endif
63
64SECTIONS
65{
66 /*
67 * XXX: The linker does not define how output sections are
68 * assigned to input sections when there are multiple statements
69 * matching the same input section name. There is no documented
70 * order of matching.
71 *
72 * unwind exit sections must be discarded before the rest of the
73 * unwind sections get included.
74 */
75 /DISCARD/ : {
76 *(.ARM.exidx.exit.text)
77 *(.ARM.extab.exit.text)
78 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
79 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
80 ARM_EXIT_DISCARD(EXIT_TEXT)
81 ARM_EXIT_DISCARD(EXIT_DATA)
82 EXIT_CALL
83#ifndef CONFIG_MMU
84 *(.text.fixup)
85 *(__ex_table)
86#endif
87#ifndef CONFIG_SMP_ON_UP
88 *(.alt.smp.init)
89#endif
90 *(.discard)
91 *(.discard.*)
92 }
93
94 . = PAGE_OFFSET + TEXT_OFFSET;
95 .head.text : {
96 _text = .;
97 HEAD_TEXT
98 }
99
100#ifdef CONFIG_DEBUG_RODATA
101 . = ALIGN(1<<SECTION_SHIFT);
102#endif
103
104 .text : { /* Real text segment */
105 _stext = .; /* Text and read-only data */
106 IDMAP_TEXT
107 __exception_text_start = .;
108 *(.exception.text)
109 __exception_text_end = .;
110 IRQENTRY_TEXT
111 SOFTIRQENTRY_TEXT
112 TEXT_TEXT
113 SCHED_TEXT
114 LOCK_TEXT
115 HYPERVISOR_TEXT
116 KPROBES_TEXT
117 *(.gnu.warning)
118 *(.glue_7)
119 *(.glue_7t)
120 . = ALIGN(4);
121 *(.got) /* Global offset table */
122 ARM_CPU_KEEP(PROC_INFO)
123 }
124
125#ifdef CONFIG_DEBUG_ALIGN_RODATA
126 . = ALIGN(1<<SECTION_SHIFT);
127#endif
128 RO_DATA(PAGE_SIZE)
129
130 . = ALIGN(4);
131 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
132 __start___ex_table = .;
133#ifdef CONFIG_MMU
134 *(__ex_table)
135#endif
136 __stop___ex_table = .;
137 }
138
139#ifdef CONFIG_ARM_UNWIND
140 /*
141 * Stack unwinding tables
142 */
143 . = ALIGN(8);
144 .ARM.unwind_idx : {
145 __start_unwind_idx = .;
146 *(.ARM.exidx*)
147 __stop_unwind_idx = .;
148 }
149 .ARM.unwind_tab : {
150 __start_unwind_tab = .;
151 *(.ARM.extab*)
152 __stop_unwind_tab = .;
153 }
154#endif
155
156 NOTES
157
158 _etext = .; /* End of text and rodata section */
159
160#ifdef CONFIG_DEBUG_RODATA
161 . = ALIGN(1<<SECTION_SHIFT);
162#else
163 . = ALIGN(PAGE_SIZE);
164#endif
165 __init_begin = .;
166
167 /*
168 * The vectors and stubs are relocatable code, and the
169 * only thing that matters is their relative offsets
170 */
171 __vectors_start = .;
172 .vectors 0xffff0000 : AT(__vectors_start) {
173 *(.vectors)
174 }
175 . = __vectors_start + SIZEOF(.vectors);
176 __vectors_end = .;
177
178 __stubs_start = .;
179 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
180 *(.stubs)
181 }
182 . = __stubs_start + SIZEOF(.stubs);
183 __stubs_end = .;
184
185 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
186
187 INIT_TEXT_SECTION(8)
188 .exit.text : {
189 ARM_EXIT_KEEP(EXIT_TEXT)
190 }
191 .init.proc.info : {
192 ARM_CPU_DISCARD(PROC_INFO)
193 }
194 .init.arch.info : {
195 __arch_info_begin = .;
196 *(.arch.info.init)
197 __arch_info_end = .;
198 }
199 .init.tagtable : {
200 __tagtable_begin = .;
201 *(.taglist.init)
202 __tagtable_end = .;
203 }
204#ifdef CONFIG_SMP_ON_UP
205 .init.smpalt : {
206 __smpalt_begin = .;
207 *(.alt.smp.init)
208 __smpalt_end = .;
209 }
210#endif
211 .init.pv_table : {
212 __pv_table_begin = .;
213 *(.pv_table)
214 __pv_table_end = .;
215 }
216 .init.data : {
217 INIT_DATA
218 INIT_SETUP(16)
219 INIT_CALLS
220 CON_INITCALL
221 SECURITY_INITCALL
222 INIT_RAM_FS
223 }
224 .exit.data : {
225 ARM_EXIT_KEEP(EXIT_DATA)
226 }
227
228#ifdef CONFIG_SMP
229 PERCPU_SECTION(L1_CACHE_BYTES)
230#endif
231
232#ifdef CONFIG_DEBUG_RODATA
233 . = ALIGN(1<<SECTION_SHIFT);
234#else
235 . = ALIGN(THREAD_SIZE);
236#endif
237 __init_end = .;
238 __data_loc = .;
239
240 .data : AT(__data_loc) {
241 _data = .; /* address in memory */
242 _sdata = .;
243
244 /*
245 * first, the init task union, aligned
246 * to an 8192 byte boundary.
247 */
248 INIT_TASK_DATA(THREAD_SIZE)
249
250 NOSAVE_DATA
251 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
252 READ_MOSTLY_DATA(L1_CACHE_BYTES)
253
254 /*
255 * and the usual data section
256 */
257 DATA_DATA
258 CONSTRUCTORS
259
260 _edata = .;
261 }
262 _edata_loc = __data_loc + SIZEOF(.data);
263
264#ifdef CONFIG_HAVE_TCM
265 /*
266 * We align everything to a page boundary so we can
267 * free it after init has commenced and TCM contents have
268 * been copied to its destination.
269 */
270 .tcm_start : {
271 . = ALIGN(PAGE_SIZE);
272 __tcm_start = .;
273 __itcm_start = .;
274 }
275
276 /*
277 * Link these to the ITCM RAM
278 * Put VMA to the TCM address and LMA to the common RAM
279 * and we'll upload the contents from RAM to TCM and free
280 * the used RAM after that.
281 */
282 .text_itcm ITCM_OFFSET : AT(__itcm_start)
283 {
284 __sitcm_text = .;
285 *(.tcm.text)
286 *(.tcm.rodata)
287 . = ALIGN(4);
288 __eitcm_text = .;
289 }
290
291 /*
292 * Reset the dot pointer, this is needed to create the
293 * relative __dtcm_start below (to be used as extern in code).
294 */
295 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
296
297 .dtcm_start : {
298 __dtcm_start = .;
299 }
300
301 /* TODO: add remainder of ITCM as well, that can be used for data! */
302 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
303 {
304 . = ALIGN(4);
305 __sdtcm_data = .;
306 *(.tcm.data)
307 . = ALIGN(4);
308 __edtcm_data = .;
309 }
310
311 /* Reset the dot pointer or the linker gets confused */
312 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
313
314 /* End marker for freeing TCM copy in linked object */
315 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
316 . = ALIGN(PAGE_SIZE);
317 __tcm_end = .;
318 }
319#endif
320
321 BSS_SECTION(0, 0, 0)
322 _end = .;
323
324 STABS_DEBUG
325}
326
327#ifdef CONFIG_DEBUG_RODATA
328/*
329 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
330 * be the first section-aligned location after __start_rodata. Otherwise,
331 * it will be equal to __start_rodata.
332 */
333__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
334#endif
335
336/*
337 * These must never be empty
338 * If you have to comment these two assert statements out, your
339 * binutils is too old (for other reasons as well)
340 */
341ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
342ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
343
344/*
345 * The HYP init code can't be more than a page long,
346 * and should not cross a page boundary.
347 * The above comment applies as well.
348 */
349ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
350 "HYP init code too big or misaligned")
351
352#endif /* CONFIG_XIP_KERNEL */