Loading...
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#ifdef CONFIG_XIP_KERNEL
7#include "vmlinux-xip.lds.S"
8#else
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/thread_info.h>
13#include <asm/memory.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#define PROC_INFO \
18 . = ALIGN(4); \
19 VMLINUX_SYMBOL(__proc_info_begin) = .; \
20 *(.proc.info.init) \
21 VMLINUX_SYMBOL(__proc_info_end) = .;
22
23#define HYPERVISOR_TEXT \
24 VMLINUX_SYMBOL(__hyp_text_start) = .; \
25 *(.hyp.text) \
26 VMLINUX_SYMBOL(__hyp_text_end) = .;
27
28#define IDMAP_TEXT \
29 ALIGN_FUNCTION(); \
30 VMLINUX_SYMBOL(__idmap_text_start) = .; \
31 *(.idmap.text) \
32 VMLINUX_SYMBOL(__idmap_text_end) = .; \
33 . = ALIGN(PAGE_SIZE); \
34 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
35 *(.hyp.idmap.text) \
36 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
37
38#ifdef CONFIG_HOTPLUG_CPU
39#define ARM_CPU_DISCARD(x)
40#define ARM_CPU_KEEP(x) x
41#else
42#define ARM_CPU_DISCARD(x) x
43#define ARM_CPU_KEEP(x)
44#endif
45
46#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
47 defined(CONFIG_GENERIC_BUG)
48#define ARM_EXIT_KEEP(x) x
49#define ARM_EXIT_DISCARD(x)
50#else
51#define ARM_EXIT_KEEP(x)
52#define ARM_EXIT_DISCARD(x) x
53#endif
54
55OUTPUT_ARCH(arm)
56ENTRY(stext)
57
58#ifndef __ARMEB__
59jiffies = jiffies_64;
60#else
61jiffies = jiffies_64 + 4;
62#endif
63
64SECTIONS
65{
66 /*
67 * XXX: The linker does not define how output sections are
68 * assigned to input sections when there are multiple statements
69 * matching the same input section name. There is no documented
70 * order of matching.
71 *
72 * unwind exit sections must be discarded before the rest of the
73 * unwind sections get included.
74 */
75 /DISCARD/ : {
76 *(.ARM.exidx.exit.text)
77 *(.ARM.extab.exit.text)
78 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
79 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
80 ARM_EXIT_DISCARD(EXIT_TEXT)
81 ARM_EXIT_DISCARD(EXIT_DATA)
82 EXIT_CALL
83#ifndef CONFIG_MMU
84 *(.text.fixup)
85 *(__ex_table)
86#endif
87#ifndef CONFIG_SMP_ON_UP
88 *(.alt.smp.init)
89#endif
90 *(.discard)
91 *(.discard.*)
92 }
93
94 . = PAGE_OFFSET + TEXT_OFFSET;
95 .head.text : {
96 _text = .;
97 HEAD_TEXT
98 }
99
100#ifdef CONFIG_DEBUG_RODATA
101 . = ALIGN(1<<SECTION_SHIFT);
102#endif
103
104 .text : { /* Real text segment */
105 _stext = .; /* Text and read-only data */
106 IDMAP_TEXT
107 __exception_text_start = .;
108 *(.exception.text)
109 __exception_text_end = .;
110 IRQENTRY_TEXT
111 SOFTIRQENTRY_TEXT
112 TEXT_TEXT
113 SCHED_TEXT
114 LOCK_TEXT
115 HYPERVISOR_TEXT
116 KPROBES_TEXT
117 *(.gnu.warning)
118 *(.glue_7)
119 *(.glue_7t)
120 . = ALIGN(4);
121 *(.got) /* Global offset table */
122 ARM_CPU_KEEP(PROC_INFO)
123 }
124
125#ifdef CONFIG_DEBUG_ALIGN_RODATA
126 . = ALIGN(1<<SECTION_SHIFT);
127#endif
128 RO_DATA(PAGE_SIZE)
129
130 . = ALIGN(4);
131 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
132 __start___ex_table = .;
133#ifdef CONFIG_MMU
134 *(__ex_table)
135#endif
136 __stop___ex_table = .;
137 }
138
139#ifdef CONFIG_ARM_UNWIND
140 /*
141 * Stack unwinding tables
142 */
143 . = ALIGN(8);
144 .ARM.unwind_idx : {
145 __start_unwind_idx = .;
146 *(.ARM.exidx*)
147 __stop_unwind_idx = .;
148 }
149 .ARM.unwind_tab : {
150 __start_unwind_tab = .;
151 *(.ARM.extab*)
152 __stop_unwind_tab = .;
153 }
154#endif
155
156 NOTES
157
158 _etext = .; /* End of text and rodata section */
159
160#ifdef CONFIG_DEBUG_RODATA
161 . = ALIGN(1<<SECTION_SHIFT);
162#else
163 . = ALIGN(PAGE_SIZE);
164#endif
165 __init_begin = .;
166
167 /*
168 * The vectors and stubs are relocatable code, and the
169 * only thing that matters is their relative offsets
170 */
171 __vectors_start = .;
172 .vectors 0xffff0000 : AT(__vectors_start) {
173 *(.vectors)
174 }
175 . = __vectors_start + SIZEOF(.vectors);
176 __vectors_end = .;
177
178 __stubs_start = .;
179 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
180 *(.stubs)
181 }
182 . = __stubs_start + SIZEOF(.stubs);
183 __stubs_end = .;
184
185 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
186
187 INIT_TEXT_SECTION(8)
188 .exit.text : {
189 ARM_EXIT_KEEP(EXIT_TEXT)
190 }
191 .init.proc.info : {
192 ARM_CPU_DISCARD(PROC_INFO)
193 }
194 .init.arch.info : {
195 __arch_info_begin = .;
196 *(.arch.info.init)
197 __arch_info_end = .;
198 }
199 .init.tagtable : {
200 __tagtable_begin = .;
201 *(.taglist.init)
202 __tagtable_end = .;
203 }
204#ifdef CONFIG_SMP_ON_UP
205 .init.smpalt : {
206 __smpalt_begin = .;
207 *(.alt.smp.init)
208 __smpalt_end = .;
209 }
210#endif
211 .init.pv_table : {
212 __pv_table_begin = .;
213 *(.pv_table)
214 __pv_table_end = .;
215 }
216 .init.data : {
217 INIT_DATA
218 INIT_SETUP(16)
219 INIT_CALLS
220 CON_INITCALL
221 SECURITY_INITCALL
222 INIT_RAM_FS
223 }
224 .exit.data : {
225 ARM_EXIT_KEEP(EXIT_DATA)
226 }
227
228#ifdef CONFIG_SMP
229 PERCPU_SECTION(L1_CACHE_BYTES)
230#endif
231
232#ifdef CONFIG_DEBUG_RODATA
233 . = ALIGN(1<<SECTION_SHIFT);
234#else
235 . = ALIGN(THREAD_SIZE);
236#endif
237 __init_end = .;
238 __data_loc = .;
239
240 .data : AT(__data_loc) {
241 _data = .; /* address in memory */
242 _sdata = .;
243
244 /*
245 * first, the init task union, aligned
246 * to an 8192 byte boundary.
247 */
248 INIT_TASK_DATA(THREAD_SIZE)
249
250 NOSAVE_DATA
251 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
252 READ_MOSTLY_DATA(L1_CACHE_BYTES)
253
254 /*
255 * and the usual data section
256 */
257 DATA_DATA
258 CONSTRUCTORS
259
260 _edata = .;
261 }
262 _edata_loc = __data_loc + SIZEOF(.data);
263
264#ifdef CONFIG_HAVE_TCM
265 /*
266 * We align everything to a page boundary so we can
267 * free it after init has commenced and TCM contents have
268 * been copied to its destination.
269 */
270 .tcm_start : {
271 . = ALIGN(PAGE_SIZE);
272 __tcm_start = .;
273 __itcm_start = .;
274 }
275
276 /*
277 * Link these to the ITCM RAM
278 * Put VMA to the TCM address and LMA to the common RAM
279 * and we'll upload the contents from RAM to TCM and free
280 * the used RAM after that.
281 */
282 .text_itcm ITCM_OFFSET : AT(__itcm_start)
283 {
284 __sitcm_text = .;
285 *(.tcm.text)
286 *(.tcm.rodata)
287 . = ALIGN(4);
288 __eitcm_text = .;
289 }
290
291 /*
292 * Reset the dot pointer, this is needed to create the
293 * relative __dtcm_start below (to be used as extern in code).
294 */
295 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
296
297 .dtcm_start : {
298 __dtcm_start = .;
299 }
300
301 /* TODO: add remainder of ITCM as well, that can be used for data! */
302 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
303 {
304 . = ALIGN(4);
305 __sdtcm_data = .;
306 *(.tcm.data)
307 . = ALIGN(4);
308 __edtcm_data = .;
309 }
310
311 /* Reset the dot pointer or the linker gets confused */
312 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
313
314 /* End marker for freeing TCM copy in linked object */
315 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
316 . = ALIGN(PAGE_SIZE);
317 __tcm_end = .;
318 }
319#endif
320
321 BSS_SECTION(0, 0, 0)
322 _end = .;
323
324 STABS_DEBUG
325}
326
327#ifdef CONFIG_DEBUG_RODATA
328/*
329 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
330 * be the first section-aligned location after __start_rodata. Otherwise,
331 * it will be equal to __start_rodata.
332 */
333__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
334#endif
335
336/*
337 * These must never be empty
338 * If you have to comment these two assert statements out, your
339 * binutils is too old (for other reasons as well)
340 */
341ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
342ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
343
344/*
345 * The HYP init code can't be more than a page long,
346 * and should not cross a page boundary.
347 * The above comment applies as well.
348 */
349ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
350 "HYP init code too big or misaligned")
351
352#endif /* CONFIG_XIP_KERNEL */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
7#ifdef CONFIG_XIP_KERNEL
8#include "vmlinux-xip.lds.S"
9#else
10
11#include <asm-generic/vmlinux.lds.h>
12#include <asm/cache.h>
13#include <asm/thread_info.h>
14#include <asm/memory.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17
18#include "vmlinux.lds.h"
19
20OUTPUT_ARCH(arm)
21ENTRY(stext)
22
23#ifndef __ARMEB__
24jiffies = jiffies_64;
25#else
26jiffies = jiffies_64 + 4;
27#endif
28
29SECTIONS
30{
31 /*
32 * XXX: The linker does not define how output sections are
33 * assigned to input sections when there are multiple statements
34 * matching the same input section name. There is no documented
35 * order of matching.
36 *
37 * unwind exit sections must be discarded before the rest of the
38 * unwind sections get included.
39 */
40 /DISCARD/ : {
41 ARM_DISCARD
42#ifndef CONFIG_SMP_ON_UP
43 *(.alt.smp.init)
44#endif
45 }
46
47 . = PAGE_OFFSET + TEXT_OFFSET;
48 .head.text : {
49 _text = .;
50 HEAD_TEXT
51 }
52
53#ifdef CONFIG_STRICT_KERNEL_RWX
54 . = ALIGN(1<<SECTION_SHIFT);
55#endif
56
57 .text : { /* Real text segment */
58 _stext = .; /* Text and read-only data */
59 ARM_TEXT
60 }
61
62#ifdef CONFIG_DEBUG_ALIGN_RODATA
63 . = ALIGN(1<<SECTION_SHIFT);
64#endif
65 _etext = .; /* End of text section */
66
67 RO_DATA(PAGE_SIZE)
68
69 . = ALIGN(4);
70 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
71 __start___ex_table = .;
72 ARM_MMU_KEEP(*(__ex_table))
73 __stop___ex_table = .;
74 }
75
76#ifdef CONFIG_ARM_UNWIND
77 ARM_UNWIND_SECTIONS
78#endif
79
80 NOTES
81
82#ifdef CONFIG_STRICT_KERNEL_RWX
83 . = ALIGN(1<<SECTION_SHIFT);
84#else
85 . = ALIGN(PAGE_SIZE);
86#endif
87 __init_begin = .;
88
89 ARM_VECTORS
90 INIT_TEXT_SECTION(8)
91 .exit.text : {
92 ARM_EXIT_KEEP(EXIT_TEXT)
93 }
94 .init.proc.info : {
95 ARM_CPU_DISCARD(PROC_INFO)
96 }
97 .init.arch.info : {
98 __arch_info_begin = .;
99 *(.arch.info.init)
100 __arch_info_end = .;
101 }
102 .init.tagtable : {
103 __tagtable_begin = .;
104 *(.taglist.init)
105 __tagtable_end = .;
106 }
107#ifdef CONFIG_SMP_ON_UP
108 .init.smpalt : {
109 __smpalt_begin = .;
110 *(.alt.smp.init)
111 __smpalt_end = .;
112 }
113#endif
114 .init.pv_table : {
115 __pv_table_begin = .;
116 *(.pv_table)
117 __pv_table_end = .;
118 }
119
120 INIT_DATA_SECTION(16)
121
122 .exit.data : {
123 ARM_EXIT_KEEP(EXIT_DATA)
124 }
125
126#ifdef CONFIG_SMP
127 PERCPU_SECTION(L1_CACHE_BYTES)
128#endif
129
130#ifdef CONFIG_HAVE_TCM
131 ARM_TCM
132#endif
133
134#ifdef CONFIG_STRICT_KERNEL_RWX
135 . = ALIGN(1<<SECTION_SHIFT);
136#else
137 . = ALIGN(THREAD_SIZE);
138#endif
139 __init_end = .;
140
141 _sdata = .;
142 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
143 _edata = .;
144
145 BSS_SECTION(0, 0, 0)
146 _end = .;
147
148 STABS_DEBUG
149}
150
151#ifdef CONFIG_STRICT_KERNEL_RWX
152/*
153 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
154 * be the first section-aligned location after __start_rodata. Otherwise,
155 * it will be equal to __start_rodata.
156 */
157__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
158#endif
159
160/*
161 * These must never be empty
162 * If you have to comment these two assert statements out, your
163 * binutils is too old (for other reasons as well)
164 */
165ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
166ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
167
168/*
169 * The HYP init code can't be more than a page long,
170 * and should not cross a page boundary.
171 * The above comment applies as well.
172 */
173ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
174 "HYP init code too big or misaligned")
175
176#endif /* CONFIG_XIP_KERNEL */