Loading...
1/* ld script to make ARM Linux kernel
2 * taken from the i386 version by Russell King
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */
5
6#ifdef CONFIG_XIP_KERNEL
7#include "vmlinux-xip.lds.S"
8#else
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/thread_info.h>
13#include <asm/memory.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#define PROC_INFO \
18 . = ALIGN(4); \
19 VMLINUX_SYMBOL(__proc_info_begin) = .; \
20 *(.proc.info.init) \
21 VMLINUX_SYMBOL(__proc_info_end) = .;
22
23#define HYPERVISOR_TEXT \
24 VMLINUX_SYMBOL(__hyp_text_start) = .; \
25 *(.hyp.text) \
26 VMLINUX_SYMBOL(__hyp_text_end) = .;
27
28#define IDMAP_TEXT \
29 ALIGN_FUNCTION(); \
30 VMLINUX_SYMBOL(__idmap_text_start) = .; \
31 *(.idmap.text) \
32 VMLINUX_SYMBOL(__idmap_text_end) = .; \
33 . = ALIGN(PAGE_SIZE); \
34 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
35 *(.hyp.idmap.text) \
36 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
37
38#ifdef CONFIG_HOTPLUG_CPU
39#define ARM_CPU_DISCARD(x)
40#define ARM_CPU_KEEP(x) x
41#else
42#define ARM_CPU_DISCARD(x) x
43#define ARM_CPU_KEEP(x)
44#endif
45
46#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
47 defined(CONFIG_GENERIC_BUG)
48#define ARM_EXIT_KEEP(x) x
49#define ARM_EXIT_DISCARD(x)
50#else
51#define ARM_EXIT_KEEP(x)
52#define ARM_EXIT_DISCARD(x) x
53#endif
54
55OUTPUT_ARCH(arm)
56ENTRY(stext)
57
58#ifndef __ARMEB__
59jiffies = jiffies_64;
60#else
61jiffies = jiffies_64 + 4;
62#endif
63
64SECTIONS
65{
66 /*
67 * XXX: The linker does not define how output sections are
68 * assigned to input sections when there are multiple statements
69 * matching the same input section name. There is no documented
70 * order of matching.
71 *
72 * unwind exit sections must be discarded before the rest of the
73 * unwind sections get included.
74 */
75 /DISCARD/ : {
76 *(.ARM.exidx.exit.text)
77 *(.ARM.extab.exit.text)
78 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
79 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
80 ARM_EXIT_DISCARD(EXIT_TEXT)
81 ARM_EXIT_DISCARD(EXIT_DATA)
82 EXIT_CALL
83#ifndef CONFIG_MMU
84 *(.text.fixup)
85 *(__ex_table)
86#endif
87#ifndef CONFIG_SMP_ON_UP
88 *(.alt.smp.init)
89#endif
90 *(.discard)
91 *(.discard.*)
92 }
93
94 . = PAGE_OFFSET + TEXT_OFFSET;
95 .head.text : {
96 _text = .;
97 HEAD_TEXT
98 }
99
100#ifdef CONFIG_DEBUG_RODATA
101 . = ALIGN(1<<SECTION_SHIFT);
102#endif
103
104 .text : { /* Real text segment */
105 _stext = .; /* Text and read-only data */
106 IDMAP_TEXT
107 __exception_text_start = .;
108 *(.exception.text)
109 __exception_text_end = .;
110 IRQENTRY_TEXT
111 SOFTIRQENTRY_TEXT
112 TEXT_TEXT
113 SCHED_TEXT
114 LOCK_TEXT
115 HYPERVISOR_TEXT
116 KPROBES_TEXT
117 *(.gnu.warning)
118 *(.glue_7)
119 *(.glue_7t)
120 . = ALIGN(4);
121 *(.got) /* Global offset table */
122 ARM_CPU_KEEP(PROC_INFO)
123 }
124
125#ifdef CONFIG_DEBUG_ALIGN_RODATA
126 . = ALIGN(1<<SECTION_SHIFT);
127#endif
128 RO_DATA(PAGE_SIZE)
129
130 . = ALIGN(4);
131 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
132 __start___ex_table = .;
133#ifdef CONFIG_MMU
134 *(__ex_table)
135#endif
136 __stop___ex_table = .;
137 }
138
139#ifdef CONFIG_ARM_UNWIND
140 /*
141 * Stack unwinding tables
142 */
143 . = ALIGN(8);
144 .ARM.unwind_idx : {
145 __start_unwind_idx = .;
146 *(.ARM.exidx*)
147 __stop_unwind_idx = .;
148 }
149 .ARM.unwind_tab : {
150 __start_unwind_tab = .;
151 *(.ARM.extab*)
152 __stop_unwind_tab = .;
153 }
154#endif
155
156 NOTES
157
158 _etext = .; /* End of text and rodata section */
159
160#ifdef CONFIG_DEBUG_RODATA
161 . = ALIGN(1<<SECTION_SHIFT);
162#else
163 . = ALIGN(PAGE_SIZE);
164#endif
165 __init_begin = .;
166
167 /*
168 * The vectors and stubs are relocatable code, and the
169 * only thing that matters is their relative offsets
170 */
171 __vectors_start = .;
172 .vectors 0xffff0000 : AT(__vectors_start) {
173 *(.vectors)
174 }
175 . = __vectors_start + SIZEOF(.vectors);
176 __vectors_end = .;
177
178 __stubs_start = .;
179 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
180 *(.stubs)
181 }
182 . = __stubs_start + SIZEOF(.stubs);
183 __stubs_end = .;
184
185 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
186
187 INIT_TEXT_SECTION(8)
188 .exit.text : {
189 ARM_EXIT_KEEP(EXIT_TEXT)
190 }
191 .init.proc.info : {
192 ARM_CPU_DISCARD(PROC_INFO)
193 }
194 .init.arch.info : {
195 __arch_info_begin = .;
196 *(.arch.info.init)
197 __arch_info_end = .;
198 }
199 .init.tagtable : {
200 __tagtable_begin = .;
201 *(.taglist.init)
202 __tagtable_end = .;
203 }
204#ifdef CONFIG_SMP_ON_UP
205 .init.smpalt : {
206 __smpalt_begin = .;
207 *(.alt.smp.init)
208 __smpalt_end = .;
209 }
210#endif
211 .init.pv_table : {
212 __pv_table_begin = .;
213 *(.pv_table)
214 __pv_table_end = .;
215 }
216 .init.data : {
217 INIT_DATA
218 INIT_SETUP(16)
219 INIT_CALLS
220 CON_INITCALL
221 SECURITY_INITCALL
222 INIT_RAM_FS
223 }
224 .exit.data : {
225 ARM_EXIT_KEEP(EXIT_DATA)
226 }
227
228#ifdef CONFIG_SMP
229 PERCPU_SECTION(L1_CACHE_BYTES)
230#endif
231
232#ifdef CONFIG_DEBUG_RODATA
233 . = ALIGN(1<<SECTION_SHIFT);
234#else
235 . = ALIGN(THREAD_SIZE);
236#endif
237 __init_end = .;
238 __data_loc = .;
239
240 .data : AT(__data_loc) {
241 _data = .; /* address in memory */
242 _sdata = .;
243
244 /*
245 * first, the init task union, aligned
246 * to an 8192 byte boundary.
247 */
248 INIT_TASK_DATA(THREAD_SIZE)
249
250 NOSAVE_DATA
251 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
252 READ_MOSTLY_DATA(L1_CACHE_BYTES)
253
254 /*
255 * and the usual data section
256 */
257 DATA_DATA
258 CONSTRUCTORS
259
260 _edata = .;
261 }
262 _edata_loc = __data_loc + SIZEOF(.data);
263
264#ifdef CONFIG_HAVE_TCM
265 /*
266 * We align everything to a page boundary so we can
267 * free it after init has commenced and TCM contents have
268 * been copied to its destination.
269 */
270 .tcm_start : {
271 . = ALIGN(PAGE_SIZE);
272 __tcm_start = .;
273 __itcm_start = .;
274 }
275
276 /*
277 * Link these to the ITCM RAM
278 * Put VMA to the TCM address and LMA to the common RAM
279 * and we'll upload the contents from RAM to TCM and free
280 * the used RAM after that.
281 */
282 .text_itcm ITCM_OFFSET : AT(__itcm_start)
283 {
284 __sitcm_text = .;
285 *(.tcm.text)
286 *(.tcm.rodata)
287 . = ALIGN(4);
288 __eitcm_text = .;
289 }
290
291 /*
292 * Reset the dot pointer, this is needed to create the
293 * relative __dtcm_start below (to be used as extern in code).
294 */
295 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
296
297 .dtcm_start : {
298 __dtcm_start = .;
299 }
300
301 /* TODO: add remainder of ITCM as well, that can be used for data! */
302 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
303 {
304 . = ALIGN(4);
305 __sdtcm_data = .;
306 *(.tcm.data)
307 . = ALIGN(4);
308 __edtcm_data = .;
309 }
310
311 /* Reset the dot pointer or the linker gets confused */
312 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
313
314 /* End marker for freeing TCM copy in linked object */
315 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
316 . = ALIGN(PAGE_SIZE);
317 __tcm_end = .;
318 }
319#endif
320
321 BSS_SECTION(0, 0, 0)
322 _end = .;
323
324 STABS_DEBUG
325}
326
327#ifdef CONFIG_DEBUG_RODATA
328/*
329 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
330 * be the first section-aligned location after __start_rodata. Otherwise,
331 * it will be equal to __start_rodata.
332 */
333__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
334#endif
335
336/*
337 * These must never be empty
338 * If you have to comment these two assert statements out, your
339 * binutils is too old (for other reasons as well)
340 */
341ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
342ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
343
344/*
345 * The HYP init code can't be more than a page long,
346 * and should not cross a page boundary.
347 * The above comment applies as well.
348 */
349ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
350 "HYP init code too big or misaligned")
351
352#endif /* CONFIG_XIP_KERNEL */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
7#ifdef CONFIG_XIP_KERNEL
8#include "vmlinux-xip.lds.S"
9#else
10
11#include <linux/pgtable.h>
12#include <asm-generic/vmlinux.lds.h>
13#include <asm/cache.h>
14#include <asm/thread_info.h>
15#include <asm/memory.h>
16#include <asm/mpu.h>
17#include <asm/page.h>
18
19#include "vmlinux.lds.h"
20
21OUTPUT_ARCH(arm)
22ENTRY(stext)
23
24#ifndef __ARMEB__
25jiffies = jiffies_64;
26#else
27jiffies = jiffies_64 + 4;
28#endif
29
30SECTIONS
31{
32 /*
33 * XXX: The linker does not define how output sections are
34 * assigned to input sections when there are multiple statements
35 * matching the same input section name. There is no documented
36 * order of matching.
37 *
38 * unwind exit sections must be discarded before the rest of the
39 * unwind sections get included.
40 */
41 /DISCARD/ : {
42 ARM_DISCARD
43#ifndef CONFIG_SMP_ON_UP
44 *(.alt.smp.init)
45#endif
46 }
47
48 . = PAGE_OFFSET + TEXT_OFFSET;
49 .head.text : {
50 _text = .;
51 HEAD_TEXT
52 }
53
54#ifdef CONFIG_STRICT_KERNEL_RWX
55 . = ALIGN(1<<SECTION_SHIFT);
56#endif
57
58#ifdef CONFIG_ARM_MPU
59 . = ALIGN(PMSAv8_MINALIGN);
60#endif
61 .text : { /* Real text segment */
62 _stext = .; /* Text and read-only data */
63 ARM_TEXT
64 }
65
66#ifdef CONFIG_DEBUG_ALIGN_RODATA
67 . = ALIGN(1<<SECTION_SHIFT);
68#endif
69 _etext = .; /* End of text section */
70
71 RO_DATA(PAGE_SIZE)
72
73 . = ALIGN(4);
74 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
75 __start___ex_table = .;
76 ARM_MMU_KEEP(*(__ex_table))
77 __stop___ex_table = .;
78 }
79
80#ifdef CONFIG_ARM_UNWIND
81 ARM_UNWIND_SECTIONS
82#endif
83
84#ifdef CONFIG_STRICT_KERNEL_RWX
85 . = ALIGN(1<<SECTION_SHIFT);
86#else
87 . = ALIGN(PAGE_SIZE);
88#endif
89 __init_begin = .;
90
91 ARM_VECTORS
92 INIT_TEXT_SECTION(8)
93 .exit.text : {
94 ARM_EXIT_KEEP(EXIT_TEXT)
95 }
96 .init.proc.info : {
97 ARM_CPU_DISCARD(PROC_INFO)
98 }
99 .init.arch.info : {
100 __arch_info_begin = .;
101 *(.arch.info.init)
102 __arch_info_end = .;
103 }
104 .init.tagtable : {
105 __tagtable_begin = .;
106 *(.taglist.init)
107 __tagtable_end = .;
108 }
109#ifdef CONFIG_SMP_ON_UP
110 .init.smpalt : {
111 __smpalt_begin = .;
112 *(.alt.smp.init)
113 __smpalt_end = .;
114 }
115#endif
116 .init.pv_table : {
117 __pv_table_begin = .;
118 *(.pv_table)
119 __pv_table_end = .;
120 }
121
122 INIT_DATA_SECTION(16)
123
124 .exit.data : {
125 ARM_EXIT_KEEP(EXIT_DATA)
126 }
127
128#ifdef CONFIG_SMP
129 PERCPU_SECTION(L1_CACHE_BYTES)
130#endif
131
132#ifdef CONFIG_HAVE_TCM
133 ARM_TCM
134#endif
135
136#ifdef CONFIG_STRICT_KERNEL_RWX
137 . = ALIGN(1<<SECTION_SHIFT);
138#else
139 . = ALIGN(THREAD_SIZE);
140#endif
141 __init_end = .;
142
143 _sdata = .;
144 RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
145 _edata = .;
146
147 BSS_SECTION(0, 0, 0)
148#ifdef CONFIG_ARM_MPU
149 . = ALIGN(PMSAv8_MINALIGN);
150#endif
151 _end = .;
152
153 STABS_DEBUG
154}
155
156#ifdef CONFIG_STRICT_KERNEL_RWX
157/*
158 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
159 * be the first section-aligned location after __start_rodata. Otherwise,
160 * it will be equal to __start_rodata.
161 */
162__start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
163#endif
164
165/*
166 * These must never be empty
167 * If you have to comment these two assert statements out, your
168 * binutils is too old (for other reasons as well)
169 */
170ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
171ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
172
173#endif /* CONFIG_XIP_KERNEL */