Loading...
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#ifdef CONFIG_XIP_KERNEL
7#include "vmlinux-xip.lds.S"
8#else
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/thread_info.h>
13#include <asm/memory.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#define PROC_INFO \
18 . = ALIGN(4); \
19 VMLINUX_SYMBOL(__proc_info_begin) = .; \
20 *(.proc.info.init) \
21 VMLINUX_SYMBOL(__proc_info_end) = .;
22
23#define HYPERVISOR_TEXT \
24 VMLINUX_SYMBOL(__hyp_text_start) = .; \
25 *(.hyp.text) \
26 VMLINUX_SYMBOL(__hyp_text_end) = .;
27
28#define IDMAP_TEXT \
29 ALIGN_FUNCTION(); \
30 VMLINUX_SYMBOL(__idmap_text_start) = .; \
31 *(.idmap.text) \
32 VMLINUX_SYMBOL(__idmap_text_end) = .; \
33 . = ALIGN(PAGE_SIZE); \
34 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
35 *(.hyp.idmap.text) \
36 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
37
38#ifdef CONFIG_HOTPLUG_CPU
39#define ARM_CPU_DISCARD(x)
40#define ARM_CPU_KEEP(x) x
41#else
42#define ARM_CPU_DISCARD(x) x
43#define ARM_CPU_KEEP(x)
44#endif
45
46#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
47 defined(CONFIG_GENERIC_BUG)
48#define ARM_EXIT_KEEP(x) x
49#define ARM_EXIT_DISCARD(x)
50#else
51#define ARM_EXIT_KEEP(x)
52#define ARM_EXIT_DISCARD(x) x
53#endif
54
55OUTPUT_ARCH(arm)
56ENTRY(stext)
57
58#ifndef __ARMEB__
59jiffies = jiffies_64;
60#else
61jiffies = jiffies_64 + 4;
62#endif
63
64SECTIONS
65{
66 /*
67 * XXX: The linker does not define how output sections are
68 * assigned to input sections when there are multiple statements
69 * matching the same input section name. There is no documented
70 * order of matching.
71 *
72 * unwind exit sections must be discarded before the rest of the
73 * unwind sections get included.
74 */
75 /DISCARD/ : {
76 *(.ARM.exidx.exit.text)
77 *(.ARM.extab.exit.text)
78 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
79 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
80 ARM_EXIT_DISCARD(EXIT_TEXT)
81 ARM_EXIT_DISCARD(EXIT_DATA)
82 EXIT_CALL
83#ifndef CONFIG_MMU
84 *(.text.fixup)
85 *(__ex_table)
86#endif
87#ifndef CONFIG_SMP_ON_UP
88 *(.alt.smp.init)
89#endif
90 *(.discard)
91 *(.discard.*)
92 }
93
94 . = PAGE_OFFSET + TEXT_OFFSET;
95 .head.text : {
96 _text = .;
97 HEAD_TEXT
98 }
99
100#ifdef CONFIG_DEBUG_RODATA
101 . = ALIGN(1<<SECTION_SHIFT);
102#endif
103
104 .text : { /* Real text segment */
105 _stext = .; /* Text and read-only data */
106 IDMAP_TEXT
107 __exception_text_start = .;
108 *(.exception.text)
109 __exception_text_end = .;
110 IRQENTRY_TEXT
111 SOFTIRQENTRY_TEXT
112 TEXT_TEXT
113 SCHED_TEXT
114 LOCK_TEXT
115 HYPERVISOR_TEXT
116 KPROBES_TEXT
117 *(.gnu.warning)
118 *(.glue_7)
119 *(.glue_7t)
120 . = ALIGN(4);
121 *(.got) /* Global offset table */
122 ARM_CPU_KEEP(PROC_INFO)
123 }
124
125#ifdef CONFIG_DEBUG_ALIGN_RODATA
126 . = ALIGN(1<<SECTION_SHIFT);
127#endif
128 RO_DATA(PAGE_SIZE)
129
130 . = ALIGN(4);
131 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
132 __start___ex_table = .;
133#ifdef CONFIG_MMU
134 *(__ex_table)
135#endif
136 __stop___ex_table = .;
137 }
138
139#ifdef CONFIG_ARM_UNWIND
140 /*
141 * Stack unwinding tables
142 */
143 . = ALIGN(8);
144 .ARM.unwind_idx : {
145 __start_unwind_idx = .;
146 *(.ARM.exidx*)
147 __stop_unwind_idx = .;
148 }
149 .ARM.unwind_tab : {
150 __start_unwind_tab = .;
151 *(.ARM.extab*)
152 __stop_unwind_tab = .;
153 }
154#endif
155
156 NOTES
157
158 _etext = .; /* End of text and rodata section */
159
160#ifdef CONFIG_DEBUG_RODATA
161 . = ALIGN(1<<SECTION_SHIFT);
162#else
163 . = ALIGN(PAGE_SIZE);
164#endif
165 __init_begin = .;
166
167 /*
168 * The vectors and stubs are relocatable code, and the
169 * only thing that matters is their relative offsets
170 */
171 __vectors_start = .;
172 .vectors 0xffff0000 : AT(__vectors_start) {
173 *(.vectors)
174 }
175 . = __vectors_start + SIZEOF(.vectors);
176 __vectors_end = .;
177
178 __stubs_start = .;
179 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
180 *(.stubs)
181 }
182 . = __stubs_start + SIZEOF(.stubs);
183 __stubs_end = .;
184
185 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
186
187 INIT_TEXT_SECTION(8)
188 .exit.text : {
189 ARM_EXIT_KEEP(EXIT_TEXT)
190 }
191 .init.proc.info : {
192 ARM_CPU_DISCARD(PROC_INFO)
193 }
194 .init.arch.info : {
195 __arch_info_begin = .;
196 *(.arch.info.init)
197 __arch_info_end = .;
198 }
199 .init.tagtable : {
200 __tagtable_begin = .;
201 *(.taglist.init)
202 __tagtable_end = .;
203 }
204#ifdef CONFIG_SMP_ON_UP
205 .init.smpalt : {
206 __smpalt_begin = .;
207 *(.alt.smp.init)
208 __smpalt_end = .;
209 }
210#endif
211 .init.pv_table : {
212 __pv_table_begin = .;
213 *(.pv_table)
214 __pv_table_end = .;
215 }
216 .init.data : {
217 INIT_DATA
218 INIT_SETUP(16)
219 INIT_CALLS
220 CON_INITCALL
221 SECURITY_INITCALL
222 INIT_RAM_FS
223 }
224 .exit.data : {
225 ARM_EXIT_KEEP(EXIT_DATA)
226 }
227
228#ifdef CONFIG_SMP
229 PERCPU_SECTION(L1_CACHE_BYTES)
230#endif
231
232#ifdef CONFIG_DEBUG_RODATA
233 . = ALIGN(1<<SECTION_SHIFT);
234#else
235 . = ALIGN(THREAD_SIZE);
236#endif
237 __init_end = .;
238 __data_loc = .;
239
240 .data : AT(__data_loc) {
241 _data = .; /* address in memory */
242 _sdata = .;
243
244 /*
245 * first, the init task union, aligned
246 * to an 8192 byte boundary.
247 */
248 INIT_TASK_DATA(THREAD_SIZE)
249
250 NOSAVE_DATA
251 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
252 READ_MOSTLY_DATA(L1_CACHE_BYTES)
253
254 /*
255 * and the usual data section
256 */
257 DATA_DATA
258 CONSTRUCTORS
259
260 _edata = .;
261 }
262 _edata_loc = __data_loc + SIZEOF(.data);
263
264#ifdef CONFIG_HAVE_TCM
265 /*
266 * We align everything to a page boundary so we can
267 * free it after init has commenced and TCM contents have
268 * been copied to its destination.
269 */
270 .tcm_start : {
271 . = ALIGN(PAGE_SIZE);
272 __tcm_start = .;
273 __itcm_start = .;
274 }
275
276 /*
277 * Link these to the ITCM RAM
278 * Put VMA to the TCM address and LMA to the common RAM
279 * and we'll upload the contents from RAM to TCM and free
280 * the used RAM after that.
281 */
282 .text_itcm ITCM_OFFSET : AT(__itcm_start)
283 {
284 __sitcm_text = .;
285 *(.tcm.text)
286 *(.tcm.rodata)
287 . = ALIGN(4);
288 __eitcm_text = .;
289 }
290
291 /*
292 * Reset the dot pointer, this is needed to create the
293 * relative __dtcm_start below (to be used as extern in code).
294 */
295 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
296
297 .dtcm_start : {
298 __dtcm_start = .;
299 }
300
301 /* TODO: add remainder of ITCM as well, that can be used for data! */
302 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
303 {
304 . = ALIGN(4);
305 __sdtcm_data = .;
306 *(.tcm.data)
307 . = ALIGN(4);
308 __edtcm_data = .;
309 }
310
311 /* Reset the dot pointer or the linker gets confused */
312 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
313
314 /* End marker for freeing TCM copy in linked object */
315 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
316 . = ALIGN(PAGE_SIZE);
317 __tcm_end = .;
318 }
319#endif
320
321 BSS_SECTION(0, 0, 0)
322 _end = .;
323
324 STABS_DEBUG
325}
326
327#ifdef CONFIG_DEBUG_RODATA
328/*
329 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
330 * be the first section-aligned location after __start_rodata. Otherwise,
331 * it will be equal to __start_rodata.
332 */
333__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
334#endif
335
336/*
337 * These must never be empty
338 * If you have to comment these two assert statements out, your
339 * binutils is too old (for other reasons as well)
340 */
341ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
342ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
343
344/*
345 * The HYP init code can't be more than a page long,
346 * and should not cross a page boundary.
347 * The above comment applies as well.
348 */
349ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
350 "HYP init code too big or misaligned")
351
352#endif /* CONFIG_XIP_KERNEL */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
7#ifdef CONFIG_XIP_KERNEL
8#include "vmlinux-xip.lds.S"
9#else
10
11#include <linux/pgtable.h>
12#include <asm/vmlinux.lds.h>
13#include <asm/cache.h>
14#include <asm/thread_info.h>
15#include <asm/page.h>
16#include <asm/mpu.h>
17
18OUTPUT_ARCH(arm)
19ENTRY(stext)
20
21#ifndef __ARMEB__
22jiffies = jiffies_64;
23#else
24jiffies = jiffies_64 + 4;
25#endif
26
27SECTIONS
28{
29 /*
30 * XXX: The linker does not define how output sections are
31 * assigned to input sections when there are multiple statements
32 * matching the same input section name. There is no documented
33 * order of matching.
34 *
35 * unwind exit sections must be discarded before the rest of the
36 * unwind sections get included.
37 */
38 /DISCARD/ : {
39 ARM_DISCARD
40#ifndef CONFIG_SMP_ON_UP
41 *(.alt.smp.init)
42#endif
43#ifndef CONFIG_ARM_UNWIND
44 *(.ARM.exidx) *(.ARM.exidx.*)
45 *(.ARM.extab) *(.ARM.extab.*)
46#endif
47 }
48
49 . = KERNEL_OFFSET + TEXT_OFFSET;
50 .head.text : {
51 _text = .;
52 HEAD_TEXT
53 }
54
55#ifdef CONFIG_STRICT_KERNEL_RWX
56 . = ALIGN(1<<SECTION_SHIFT);
57#endif
58
59#ifdef CONFIG_ARM_MPU
60 . = ALIGN(PMSAv8_MINALIGN);
61#endif
62 .text : { /* Real text segment */
63 _stext = .; /* Text and read-only data */
64 ARM_TEXT
65 }
66
67#ifdef CONFIG_DEBUG_ALIGN_RODATA
68 . = ALIGN(1<<SECTION_SHIFT);
69#endif
70 _etext = .; /* End of text section */
71
72 RO_DATA(PAGE_SIZE)
73
74 . = ALIGN(4);
75 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
76 __start___ex_table = .;
77 ARM_MMU_KEEP(KEEP(*(__ex_table)))
78 __stop___ex_table = .;
79 }
80
81#ifdef CONFIG_ARM_UNWIND
82 ARM_UNWIND_SECTIONS
83#endif
84
85#ifdef CONFIG_STRICT_KERNEL_RWX
86 . = ALIGN(1<<SECTION_SHIFT);
87#else
88 . = ALIGN(PAGE_SIZE);
89#endif
90 __init_begin = .;
91
92 ARM_VECTORS
93 INIT_TEXT_SECTION(8)
94 .exit.text : {
95 ARM_EXIT_KEEP(EXIT_TEXT)
96 }
97 .init.proc.info : {
98 ARM_CPU_DISCARD(PROC_INFO)
99 }
100 .init.arch.info : {
101 __arch_info_begin = .;
102 KEEP(*(.arch.info.init))
103 __arch_info_end = .;
104 }
105 .init.tagtable : {
106 __tagtable_begin = .;
107 *(.taglist.init)
108 __tagtable_end = .;
109 }
110#ifdef CONFIG_SMP_ON_UP
111 .init.smpalt : {
112 __smpalt_begin = .;
113 *(.alt.smp.init)
114 __smpalt_end = .;
115 }
116#endif
117 .init.pv_table : {
118 __pv_table_begin = .;
119 KEEP(*(.pv_table))
120 __pv_table_end = .;
121 }
122
123 INIT_DATA_SECTION(16)
124
125 .exit.data : {
126 ARM_EXIT_KEEP(EXIT_DATA)
127 }
128
129#ifdef CONFIG_SMP
130 PERCPU_SECTION(L1_CACHE_BYTES)
131#endif
132
133#ifdef CONFIG_HAVE_TCM
134 ARM_TCM
135#endif
136
137#ifdef CONFIG_STRICT_KERNEL_RWX
138 . = ALIGN(1<<SECTION_SHIFT);
139#else
140 . = ALIGN(THREAD_ALIGN);
141#endif
142 __init_end = .;
143
144 _sdata = .;
145 RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
146 _edata = .;
147
148 BSS_SECTION(0, 0, 0)
149#ifdef CONFIG_ARM_MPU
150 . = ALIGN(PMSAv8_MINALIGN);
151#endif
152 _end = .;
153
154 STABS_DEBUG
155 DWARF_DEBUG
156 ARM_DETAILS
157
158 ARM_ASSERTS
159}
160
161#ifdef CONFIG_STRICT_KERNEL_RWX
162/*
163 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
164 * be the first section-aligned location after __start_rodata. Otherwise,
165 * it will be equal to __start_rodata.
166 */
167__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
168#endif
169
170/*
171 * These must never be empty
172 * If you have to comment these two assert statements out, your
173 * binutils is too old (for other reasons as well)
174 */
175ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
176#ifndef CONFIG_COMPILE_TEST
177ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
178#endif
179
180#endif /* CONFIG_XIP_KERNEL */