Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_PPC64
3#define PROVIDE32(x) PROVIDE(__unused__##x)
4#else
5#define PROVIDE32(x) PROVIDE(x)
6#endif
7
8#define BSS_FIRST_SECTIONS *(.bss.prominit)
9#define EMITS_PT_NOTE
10#define RO_EXCEPTION_TABLE_ALIGN 0
11
12#include <asm/page.h>
13#include <asm-generic/vmlinux.lds.h>
14#include <asm/cache.h>
15#include <asm/thread_info.h>
16
17#define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
18
19ENTRY(_stext)
20
21PHDRS {
22 text PT_LOAD FLAGS(7); /* RWX */
23 note PT_NOTE FLAGS(0);
24}
25
26#ifdef CONFIG_PPC64
27OUTPUT_ARCH(powerpc:common64)
28jiffies = jiffies_64;
29#else
30OUTPUT_ARCH(powerpc:common)
31jiffies = jiffies_64 + 4;
32#endif
33SECTIONS
34{
35 . = KERNELBASE;
36
37/*
38 * Text, read only data and other permanent read-only sections
39 */
40
41 _text = .;
42 _stext = .;
43
44 /*
45 * Head text.
46 * This needs to be in its own output section to avoid ld placing
47 * branch trampoline stubs randomly throughout the fixed sections,
48 * which it will do (even if the branch comes from another section)
49 * in order to optimize stub generation.
50 */
51 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
52#ifdef CONFIG_PPC64
53 KEEP(*(.head.text.first_256B));
54#ifdef CONFIG_PPC_BOOK3E
55#else
56 KEEP(*(.head.text.real_vectors));
57 *(.head.text.real_trampolines);
58 KEEP(*(.head.text.virt_vectors));
59 *(.head.text.virt_trampolines);
60# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
61 KEEP(*(.head.data.fwnmi_page));
62# endif
63#endif
64#else /* !CONFIG_PPC64 */
65 HEAD_TEXT
66#endif
67 } :text
68
69 __head_end = .;
70
71#ifdef CONFIG_PPC64
72 /*
73 * ALIGN(0) overrides the default output section alignment because
74 * this needs to start right after .head.text in order for fixed
75 * section placement to work.
76 */
77 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
78#ifdef CONFIG_LD_HEAD_STUB_CATCH
79 KEEP(*(.linker_stub_catch));
80 . = . ;
81#endif
82
83#else
84 .text : AT(ADDR(.text) - LOAD_OFFSET) {
85 ALIGN_FUNCTION();
86#endif
87 /* careful! __ftr_alt_* sections need to be close to .text */
88 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
89#ifdef CONFIG_PPC64
90 *(.tramp.ftrace.text);
91#endif
92 NOINSTR_TEXT
93 SCHED_TEXT
94 CPUIDLE_TEXT
95 LOCK_TEXT
96 KPROBES_TEXT
97 IRQENTRY_TEXT
98 SOFTIRQENTRY_TEXT
99 /*
100 * -Os builds call FP save/restore functions. The powerpc64
101 * linker generates those on demand in the .sfpr section.
102 * .sfpr gets placed at the beginning of a group of input
103 * sections, which can break start-of-text offset if it is
104 * included with the main text sections, so put it by itself.
105 */
106 *(.sfpr);
107 MEM_KEEP(init.text)
108 MEM_KEEP(exit.text)
109
110#ifdef CONFIG_PPC32
111 *(.got1)
112 __got2_start = .;
113 *(.got2)
114 __got2_end = .;
115#endif /* CONFIG_PPC32 */
116
117 } :text
118
119 . = ALIGN(PAGE_SIZE);
120 _etext = .;
121 PROVIDE32 (etext = .);
122
123 /* Read-only data */
124 RO_DATA(PAGE_SIZE)
125
126#ifdef CONFIG_PPC64
127 . = ALIGN(8);
128 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
129 __start___stf_entry_barrier_fixup = .;
130 *(__stf_entry_barrier_fixup)
131 __stop___stf_entry_barrier_fixup = .;
132 }
133
134 . = ALIGN(8);
135 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
136 __start___stf_exit_barrier_fixup = .;
137 *(__stf_exit_barrier_fixup)
138 __stop___stf_exit_barrier_fixup = .;
139 }
140
141 . = ALIGN(8);
142 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
143 __start___rfi_flush_fixup = .;
144 *(__rfi_flush_fixup)
145 __stop___rfi_flush_fixup = .;
146 }
147#endif /* CONFIG_PPC64 */
148
149#ifdef CONFIG_PPC_BARRIER_NOSPEC
150 . = ALIGN(8);
151 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
152 __start___barrier_nospec_fixup = .;
153 *(__barrier_nospec_fixup)
154 __stop___barrier_nospec_fixup = .;
155 }
156#endif /* CONFIG_PPC_BARRIER_NOSPEC */
157
158#ifdef CONFIG_PPC_FSL_BOOK3E
159 . = ALIGN(8);
160 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
161 __start__btb_flush_fixup = .;
162 *(__btb_flush_fixup)
163 __stop__btb_flush_fixup = .;
164 }
165#endif
166
167/*
168 * Init sections discarded at runtime
169 */
170 . = ALIGN(STRICT_ALIGN_SIZE);
171 __init_begin = .;
172 . = ALIGN(PAGE_SIZE);
173 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
174 _sinittext = .;
175 INIT_TEXT
176 _einittext = .;
177#ifdef CONFIG_PPC64
178 *(.tramp.ftrace.init);
179#endif
180 } :text
181
182 /* .exit.text is discarded at runtime, not link time,
183 * to deal with references from __bug_table
184 */
185 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
186 EXIT_TEXT
187 }
188
189 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
190 INIT_DATA
191 }
192
193 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
194 INIT_SETUP(16)
195 }
196
197 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
198 INIT_CALLS
199 }
200
201 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
202 CON_INITCALL
203 }
204
205 . = ALIGN(8);
206 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
207 __start___ftr_fixup = .;
208 KEEP(*(__ftr_fixup))
209 __stop___ftr_fixup = .;
210 }
211 . = ALIGN(8);
212 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
213 __start___mmu_ftr_fixup = .;
214 KEEP(*(__mmu_ftr_fixup))
215 __stop___mmu_ftr_fixup = .;
216 }
217 . = ALIGN(8);
218 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
219 __start___lwsync_fixup = .;
220 KEEP(*(__lwsync_fixup))
221 __stop___lwsync_fixup = .;
222 }
223#ifdef CONFIG_PPC64
224 . = ALIGN(8);
225 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
226 __start___fw_ftr_fixup = .;
227 KEEP(*(__fw_ftr_fixup))
228 __stop___fw_ftr_fixup = .;
229 }
230#endif
231 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
232 INIT_RAM_FS
233 }
234
235 PERCPU_SECTION(L1_CACHE_BYTES)
236
237 . = ALIGN(8);
238 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
239 __machine_desc_start = . ;
240 KEEP(*(.machine.desc))
241 __machine_desc_end = . ;
242 }
243#ifdef CONFIG_RELOCATABLE
244 . = ALIGN(8);
245 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
246 {
247#ifdef CONFIG_PPC32
248 __dynamic_symtab = .;
249#endif
250 *(.dynsym)
251 }
252 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
253 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
254 {
255 __dynamic_start = .;
256 *(.dynamic)
257 }
258 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
259 .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
260 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
261 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
262 {
263 __rela_dyn_start = .;
264 *(.rela*)
265 }
266#endif
267 /* .exit.data is discarded at runtime, not link time,
268 * to deal with references from .exit.text
269 */
270 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
271 EXIT_DATA
272 }
273
274 /* freed after init ends here */
275 . = ALIGN(PAGE_SIZE);
276 __init_end = .;
277
278/*
279 * And now the various read/write data
280 */
281
282 . = ALIGN(PAGE_SIZE);
283 _sdata = .;
284
285#ifdef CONFIG_PPC32
286 .data : AT(ADDR(.data) - LOAD_OFFSET) {
287 DATA_DATA
288#ifdef CONFIG_UBSAN
289 *(.data..Lubsan_data*)
290 *(.data..Lubsan_type*)
291#endif
292 *(.data.rel*)
293 *(SDATA_MAIN)
294 *(.sdata2)
295 *(.got.plt) *(.got)
296 *(.plt)
297 *(.branch_lt)
298 }
299#else
300 .data : AT(ADDR(.data) - LOAD_OFFSET) {
301 DATA_DATA
302 *(.data.rel*)
303 *(.toc1)
304 *(.branch_lt)
305 }
306
307 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
308 __start_opd = .;
309 KEEP(*(.opd))
310 __end_opd = .;
311 }
312
313 . = ALIGN(256);
314 .got : AT(ADDR(.got) - LOAD_OFFSET) {
315 __toc_start = .;
316#ifndef CONFIG_RELOCATABLE
317 __prom_init_toc_start = .;
318 arch/powerpc/kernel/prom_init.o*(.toc .got)
319 __prom_init_toc_end = .;
320#endif
321 *(.got)
322 *(.toc)
323 }
324#endif
325
326 /* The initial task and kernel stack */
327 INIT_TASK_DATA_SECTION(THREAD_ALIGN)
328
329 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
330 PAGE_ALIGNED_DATA(PAGE_SIZE)
331 }
332
333 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
334 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
335 }
336
337 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
338 READ_MOSTLY_DATA(L1_CACHE_BYTES)
339 }
340
341 . = ALIGN(PAGE_SIZE);
342 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
343 NOSAVE_DATA
344 }
345
346 BUG_TABLE
347
348 . = ALIGN(PAGE_SIZE);
349 _edata = .;
350 PROVIDE32 (edata = .);
351
352/*
353 * And finally the bss
354 */
355
356 BSS_SECTION(0, 0, 0)
357
358 . = ALIGN(PAGE_SIZE);
359 _end = . ;
360 PROVIDE32 (end = .);
361
362 STABS_DEBUG
363
364 DWARF_DEBUG
365
366 DISCARDS
367 /DISCARD/ : {
368 *(*.EMB.apuinfo)
369 *(.glink .iplt .plt .rela* .comment)
370 *(.gnu.version*)
371 *(.gnu.attributes)
372 *(.eh_frame)
373 }
374}
1#ifdef CONFIG_PPC64
2#define PROVIDE32(x) PROVIDE(__unused__##x)
3#else
4#define PROVIDE32(x) PROVIDE(x)
5#endif
6#include <asm/page.h>
7#include <asm-generic/vmlinux.lds.h>
8#include <asm/cache.h>
9#include <asm/thread_info.h>
10
11ENTRY(_stext)
12
13PHDRS {
14 kernel PT_LOAD FLAGS(7); /* RWX */
15 notes PT_NOTE FLAGS(0);
16 dummy PT_NOTE FLAGS(0);
17
18 /* binutils < 2.18 has a bug that makes it misbehave when taking an
19 ELF file with all segments at load address 0 as input. This
20 happens when running "strip" on vmlinux, because of the AT() magic
21 in this linker script. People using GCC >= 4.2 won't run into
22 this problem, because the "build-id" support will put some data
23 into the "notes" segment (at a non-zero load address).
24
25 To work around this, we force some data into both the "dummy"
26 segment and the kernel segment, so the dummy segment will get a
27 non-zero load address. It's not enough to always create the
28 "notes" segment, since if nothing gets assigned to it, its load
29 address will be zero. */
30}
31
32#ifdef CONFIG_PPC64
33OUTPUT_ARCH(powerpc:common64)
34jiffies = jiffies_64;
35#else
36OUTPUT_ARCH(powerpc:common)
37jiffies = jiffies_64 + 4;
38#endif
39SECTIONS
40{
41 . = 0;
42 reloc_start = .;
43
44 . = KERNELBASE;
45
46/*
47 * Text, read only data and other permanent read-only sections
48 */
49
50 /* Text and gots */
51 .text : AT(ADDR(.text) - LOAD_OFFSET) {
52 ALIGN_FUNCTION();
53 HEAD_TEXT
54 _text = .;
55 /* careful! __ftr_alt_* sections need to be close to .text */
56 *(.text .fixup __ftr_alt_* .ref.text)
57 SCHED_TEXT
58 LOCK_TEXT
59 KPROBES_TEXT
60 IRQENTRY_TEXT
61
62#ifdef CONFIG_PPC32
63 *(.got1)
64 __got2_start = .;
65 *(.got2)
66 __got2_end = .;
67#endif /* CONFIG_PPC32 */
68
69 } :kernel
70
71 . = ALIGN(PAGE_SIZE);
72 _etext = .;
73 PROVIDE32 (etext = .);
74
75 /* Read-only data */
76 RODATA
77
78 EXCEPTION_TABLE(0)
79
80 NOTES :kernel :notes
81
82 /* The dummy segment contents for the bug workaround mentioned above
83 near PHDRS. */
84 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
85 LONG(0)
86 LONG(0)
87 LONG(0)
88 } :kernel :dummy
89
90/*
91 * Init sections discarded at runtime
92 */
93 . = ALIGN(PAGE_SIZE);
94 __init_begin = .;
95 INIT_TEXT_SECTION(PAGE_SIZE) :kernel
96
97 /* .exit.text is discarded at runtime, not link time,
98 * to deal with references from __bug_table
99 */
100 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
101 EXIT_TEXT
102 }
103
104 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
105 INIT_DATA
106 __vtop_table_begin = .;
107 *(.vtop_fixup);
108 __vtop_table_end = .;
109 __ptov_table_begin = .;
110 *(.ptov_fixup);
111 __ptov_table_end = .;
112 }
113
114 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
115 INIT_SETUP(16)
116 }
117
118 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
119 INIT_CALLS
120 }
121
122 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
123 CON_INITCALL
124 }
125
126 SECURITY_INIT
127
128 . = ALIGN(8);
129 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
130 __start___ftr_fixup = .;
131 *(__ftr_fixup)
132 __stop___ftr_fixup = .;
133 }
134 . = ALIGN(8);
135 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
136 __start___mmu_ftr_fixup = .;
137 *(__mmu_ftr_fixup)
138 __stop___mmu_ftr_fixup = .;
139 }
140 . = ALIGN(8);
141 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
142 __start___lwsync_fixup = .;
143 *(__lwsync_fixup)
144 __stop___lwsync_fixup = .;
145 }
146#ifdef CONFIG_PPC64
147 . = ALIGN(8);
148 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
149 __start___fw_ftr_fixup = .;
150 *(__fw_ftr_fixup)
151 __stop___fw_ftr_fixup = .;
152 }
153#endif
154 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
155 INIT_RAM_FS
156 }
157
158 PERCPU_SECTION(L1_CACHE_BYTES)
159
160 . = ALIGN(8);
161 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
162 __machine_desc_start = . ;
163 *(.machine.desc)
164 __machine_desc_end = . ;
165 }
166#ifdef CONFIG_RELOCATABLE
167 . = ALIGN(8);
168 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
169 {
170#ifdef CONFIG_RELOCATABLE_PPC32
171 __dynamic_symtab = .;
172#endif
173 *(.dynsym)
174 }
175 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
176 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
177 {
178 __dynamic_start = .;
179 *(.dynamic)
180 }
181 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
182 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
183 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
184 {
185 __rela_dyn_start = .;
186 *(.rela*)
187 }
188#endif
189
190 /* freed after init ends here */
191 . = ALIGN(PAGE_SIZE);
192 __init_end = .;
193
194/*
195 * And now the various read/write data
196 */
197
198 . = ALIGN(PAGE_SIZE);
199 _sdata = .;
200
201#ifdef CONFIG_PPC32
202 .data : AT(ADDR(.data) - LOAD_OFFSET) {
203 DATA_DATA
204 *(.sdata)
205 *(.got.plt) *(.got)
206 }
207#else
208 .data : AT(ADDR(.data) - LOAD_OFFSET) {
209 DATA_DATA
210 *(.data.rel*)
211 *(.toc1)
212 *(.branch_lt)
213 }
214
215 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
216 *(.opd)
217 }
218
219 .got : AT(ADDR(.got) - LOAD_OFFSET) {
220 __toc_start = .;
221 *(.got)
222 *(.toc)
223 }
224#endif
225
226 /* The initial task and kernel stack */
227 INIT_TASK_DATA_SECTION(THREAD_SIZE)
228
229 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
230 PAGE_ALIGNED_DATA(PAGE_SIZE)
231 }
232
233 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
234 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
235 }
236
237 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
238 READ_MOSTLY_DATA(L1_CACHE_BYTES)
239 }
240
241 . = ALIGN(PAGE_SIZE);
242 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
243 NOSAVE_DATA
244 }
245
246 . = ALIGN(PAGE_SIZE);
247 _edata = .;
248 PROVIDE32 (edata = .);
249
250/*
251 * And finally the bss
252 */
253
254 BSS_SECTION(0, 0, 0)
255
256 . = ALIGN(PAGE_SIZE);
257 _end = . ;
258 PROVIDE32 (end = .);
259
260 /* Sections to be discarded. */
261 DISCARDS
262}