Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_PPC64
3#define PROVIDE32(x) PROVIDE(__unused__##x)
4#else
5#define PROVIDE32(x) PROVIDE(x)
6#endif
7
8#define BSS_FIRST_SECTIONS *(.bss.prominit)
9#define EMITS_PT_NOTE
10#define RO_EXCEPTION_TABLE_ALIGN 0
11
12#include <asm/page.h>
13#include <asm-generic/vmlinux.lds.h>
14#include <asm/cache.h>
15#include <asm/thread_info.h>
16
17#define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
18
19ENTRY(_stext)
20
21PHDRS {
22 text PT_LOAD FLAGS(7); /* RWX */
23 note PT_NOTE FLAGS(0);
24}
25
26#ifdef CONFIG_PPC64
27OUTPUT_ARCH(powerpc:common64)
28jiffies = jiffies_64;
29#else
30OUTPUT_ARCH(powerpc:common)
31jiffies = jiffies_64 + 4;
32#endif
33SECTIONS
34{
35 . = KERNELBASE;
36
37/*
38 * Text, read only data and other permanent read-only sections
39 */
40
41 _text = .;
42 _stext = .;
43
44 /*
45 * Head text.
46 * This needs to be in its own output section to avoid ld placing
47 * branch trampoline stubs randomly throughout the fixed sections,
48 * which it will do (even if the branch comes from another section)
49 * in order to optimize stub generation.
50 */
51 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
52#ifdef CONFIG_PPC64
53 KEEP(*(.head.text.first_256B));
54#ifdef CONFIG_PPC_BOOK3E
55#else
56 KEEP(*(.head.text.real_vectors));
57 *(.head.text.real_trampolines);
58 KEEP(*(.head.text.virt_vectors));
59 *(.head.text.virt_trampolines);
60# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
61 KEEP(*(.head.data.fwnmi_page));
62# endif
63#endif
64#else /* !CONFIG_PPC64 */
65 HEAD_TEXT
66#endif
67 } :text
68
69 __head_end = .;
70
71#ifdef CONFIG_PPC64
72 /*
73 * ALIGN(0) overrides the default output section alignment because
74 * this needs to start right after .head.text in order for fixed
75 * section placement to work.
76 */
77 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
78#ifdef CONFIG_LD_HEAD_STUB_CATCH
79 KEEP(*(.linker_stub_catch));
80 . = . ;
81#endif
82
83#else
84 .text : AT(ADDR(.text) - LOAD_OFFSET) {
85 ALIGN_FUNCTION();
86#endif
87 /* careful! __ftr_alt_* sections need to be close to .text */
88 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
89#ifdef CONFIG_PPC64
90 *(.tramp.ftrace.text);
91#endif
92 NOINSTR_TEXT
93 SCHED_TEXT
94 CPUIDLE_TEXT
95 LOCK_TEXT
96 KPROBES_TEXT
97 IRQENTRY_TEXT
98 SOFTIRQENTRY_TEXT
99 /*
100 * -Os builds call FP save/restore functions. The powerpc64
101 * linker generates those on demand in the .sfpr section.
102 * .sfpr gets placed at the beginning of a group of input
103 * sections, which can break start-of-text offset if it is
104 * included with the main text sections, so put it by itself.
105 */
106 *(.sfpr);
107 MEM_KEEP(init.text)
108 MEM_KEEP(exit.text)
109
110#ifdef CONFIG_PPC32
111 *(.got1)
112 __got2_start = .;
113 *(.got2)
114 __got2_end = .;
115#endif /* CONFIG_PPC32 */
116
117 } :text
118
119 . = ALIGN(PAGE_SIZE);
120 _etext = .;
121 PROVIDE32 (etext = .);
122
123 /* Read-only data */
124 RO_DATA(PAGE_SIZE)
125
126#ifdef CONFIG_PPC64
127 . = ALIGN(8);
128 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
129 __start___stf_entry_barrier_fixup = .;
130 *(__stf_entry_barrier_fixup)
131 __stop___stf_entry_barrier_fixup = .;
132 }
133
134 . = ALIGN(8);
135 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
136 __start___stf_exit_barrier_fixup = .;
137 *(__stf_exit_barrier_fixup)
138 __stop___stf_exit_barrier_fixup = .;
139 }
140
141 . = ALIGN(8);
142 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
143 __start___rfi_flush_fixup = .;
144 *(__rfi_flush_fixup)
145 __stop___rfi_flush_fixup = .;
146 }
147#endif /* CONFIG_PPC64 */
148
149#ifdef CONFIG_PPC_BARRIER_NOSPEC
150 . = ALIGN(8);
151 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
152 __start___barrier_nospec_fixup = .;
153 *(__barrier_nospec_fixup)
154 __stop___barrier_nospec_fixup = .;
155 }
156#endif /* CONFIG_PPC_BARRIER_NOSPEC */
157
158#ifdef CONFIG_PPC_FSL_BOOK3E
159 . = ALIGN(8);
160 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
161 __start__btb_flush_fixup = .;
162 *(__btb_flush_fixup)
163 __stop__btb_flush_fixup = .;
164 }
165#endif
166
167/*
168 * Init sections discarded at runtime
169 */
170 . = ALIGN(STRICT_ALIGN_SIZE);
171 __init_begin = .;
172 . = ALIGN(PAGE_SIZE);
173 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
174 _sinittext = .;
175 INIT_TEXT
176 _einittext = .;
177#ifdef CONFIG_PPC64
178 *(.tramp.ftrace.init);
179#endif
180 } :text
181
182 /* .exit.text is discarded at runtime, not link time,
183 * to deal with references from __bug_table
184 */
185 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
186 EXIT_TEXT
187 }
188
189 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
190 INIT_DATA
191 }
192
193 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
194 INIT_SETUP(16)
195 }
196
197 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
198 INIT_CALLS
199 }
200
201 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
202 CON_INITCALL
203 }
204
205 . = ALIGN(8);
206 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
207 __start___ftr_fixup = .;
208 KEEP(*(__ftr_fixup))
209 __stop___ftr_fixup = .;
210 }
211 . = ALIGN(8);
212 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
213 __start___mmu_ftr_fixup = .;
214 KEEP(*(__mmu_ftr_fixup))
215 __stop___mmu_ftr_fixup = .;
216 }
217 . = ALIGN(8);
218 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
219 __start___lwsync_fixup = .;
220 KEEP(*(__lwsync_fixup))
221 __stop___lwsync_fixup = .;
222 }
223#ifdef CONFIG_PPC64
224 . = ALIGN(8);
225 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
226 __start___fw_ftr_fixup = .;
227 KEEP(*(__fw_ftr_fixup))
228 __stop___fw_ftr_fixup = .;
229 }
230#endif
231 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
232 INIT_RAM_FS
233 }
234
235 PERCPU_SECTION(L1_CACHE_BYTES)
236
237 . = ALIGN(8);
238 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
239 __machine_desc_start = . ;
240 KEEP(*(.machine.desc))
241 __machine_desc_end = . ;
242 }
243#ifdef CONFIG_RELOCATABLE
244 . = ALIGN(8);
245 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
246 {
247#ifdef CONFIG_PPC32
248 __dynamic_symtab = .;
249#endif
250 *(.dynsym)
251 }
252 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
253 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
254 {
255 __dynamic_start = .;
256 *(.dynamic)
257 }
258 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
259 .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
260 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
261 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
262 {
263 __rela_dyn_start = .;
264 *(.rela*)
265 }
266#endif
267 /* .exit.data is discarded at runtime, not link time,
268 * to deal with references from .exit.text
269 */
270 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
271 EXIT_DATA
272 }
273
274 /* freed after init ends here */
275 . = ALIGN(PAGE_SIZE);
276 __init_end = .;
277
278/*
279 * And now the various read/write data
280 */
281
282 . = ALIGN(PAGE_SIZE);
283 _sdata = .;
284
285#ifdef CONFIG_PPC32
286 .data : AT(ADDR(.data) - LOAD_OFFSET) {
287 DATA_DATA
288#ifdef CONFIG_UBSAN
289 *(.data..Lubsan_data*)
290 *(.data..Lubsan_type*)
291#endif
292 *(.data.rel*)
293 *(SDATA_MAIN)
294 *(.sdata2)
295 *(.got.plt) *(.got)
296 *(.plt)
297 *(.branch_lt)
298 }
299#else
300 .data : AT(ADDR(.data) - LOAD_OFFSET) {
301 DATA_DATA
302 *(.data.rel*)
303 *(.toc1)
304 *(.branch_lt)
305 }
306
307 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
308 __start_opd = .;
309 KEEP(*(.opd))
310 __end_opd = .;
311 }
312
313 . = ALIGN(256);
314 .got : AT(ADDR(.got) - LOAD_OFFSET) {
315 __toc_start = .;
316#ifndef CONFIG_RELOCATABLE
317 __prom_init_toc_start = .;
318 arch/powerpc/kernel/prom_init.o*(.toc .got)
319 __prom_init_toc_end = .;
320#endif
321 *(.got)
322 *(.toc)
323 }
324#endif
325
326 /* The initial task and kernel stack */
327 INIT_TASK_DATA_SECTION(THREAD_ALIGN)
328
329 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
330 PAGE_ALIGNED_DATA(PAGE_SIZE)
331 }
332
333 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
334 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
335 }
336
337 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
338 READ_MOSTLY_DATA(L1_CACHE_BYTES)
339 }
340
341 . = ALIGN(PAGE_SIZE);
342 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
343 NOSAVE_DATA
344 }
345
346 BUG_TABLE
347
348 . = ALIGN(PAGE_SIZE);
349 _edata = .;
350 PROVIDE32 (edata = .);
351
352/*
353 * And finally the bss
354 */
355
356 BSS_SECTION(0, 0, 0)
357
358 . = ALIGN(PAGE_SIZE);
359 _end = . ;
360 PROVIDE32 (end = .);
361
362 STABS_DEBUG
363
364 DWARF_DEBUG
365
366 DISCARDS
367 /DISCARD/ : {
368 *(*.EMB.apuinfo)
369 *(.glink .iplt .plt .rela* .comment)
370 *(.gnu.version*)
371 *(.gnu.attributes)
372 *(.eh_frame)
373 }
374}
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_PPC64
3#define PROVIDE32(x) PROVIDE(__unused__##x)
4#else
5#define PROVIDE32(x) PROVIDE(x)
6#endif
7
8#define BSS_FIRST_SECTIONS *(.bss.prominit)
9#define EMITS_PT_NOTE
10#define RO_EXCEPTION_TABLE_ALIGN 0
11#define RUNTIME_DISCARD_EXIT
12
13#define SOFT_MASK_TABLE(align) \
14 . = ALIGN(align); \
15 __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
16 __start___soft_mask_table = .; \
17 KEEP(*(__soft_mask_table)) \
18 __stop___soft_mask_table = .; \
19 }
20
21#define RESTART_TABLE(align) \
22 . = ALIGN(align); \
23 __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
24 __start___restart_table = .; \
25 KEEP(*(__restart_table)) \
26 __stop___restart_table = .; \
27 }
28
29#include <asm/page.h>
30#include <asm-generic/vmlinux.lds.h>
31#include <asm/cache.h>
32#include <asm/thread_info.h>
33
34#define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
35
36#if STRICT_ALIGN_SIZE < PAGE_SIZE
37#error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT"
38#endif
39
40ENTRY(_stext)
41
42PHDRS {
43 text PT_LOAD FLAGS(7); /* RWX */
44 note PT_NOTE FLAGS(0);
45}
46
47#ifdef CONFIG_PPC64
48OUTPUT_ARCH(powerpc:common64)
49jiffies = jiffies_64;
50#else
51OUTPUT_ARCH(powerpc:common)
52jiffies = jiffies_64 + 4;
53#endif
54SECTIONS
55{
56 . = KERNELBASE;
57
58/*
59 * Text, read only data and other permanent read-only sections
60 */
61
62 _text = .;
63 _stext = .;
64
65 /*
66 * Head text.
67 * This needs to be in its own output section to avoid ld placing
68 * branch trampoline stubs randomly throughout the fixed sections,
69 * which it will do (even if the branch comes from another section)
70 * in order to optimize stub generation.
71 */
72 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
73#ifdef CONFIG_PPC64
74 KEEP(*(.head.text.first_256B));
75#ifdef CONFIG_PPC_BOOK3E_64
76#else
77 KEEP(*(.head.text.real_vectors));
78 *(.head.text.real_trampolines);
79 KEEP(*(.head.text.virt_vectors));
80 *(.head.text.virt_trampolines);
81# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
82 KEEP(*(.head.data.fwnmi_page));
83# endif
84#endif
85#else /* !CONFIG_PPC64 */
86 HEAD_TEXT
87#endif
88 } :text
89
90 __head_end = .;
91
92#ifdef CONFIG_PPC64
93 /*
94 * ALIGN(0) overrides the default output section alignment because
95 * this needs to start right after .head.text in order for fixed
96 * section placement to work.
97 */
98 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
99#ifdef CONFIG_LD_HEAD_STUB_CATCH
100 KEEP(*(.linker_stub_catch));
101 . = . ;
102#endif
103
104#else
105 .text : AT(ADDR(.text) - LOAD_OFFSET) {
106 ALIGN_FUNCTION();
107#endif
108 /* careful! __ftr_alt_* sections need to be close to .text */
109 *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
110#ifdef CONFIG_PPC64
111 *(.tramp.ftrace.text);
112#endif
113 NOINSTR_TEXT
114 SCHED_TEXT
115 CPUIDLE_TEXT
116 LOCK_TEXT
117 KPROBES_TEXT
118 IRQENTRY_TEXT
119 SOFTIRQENTRY_TEXT
120 /*
121 * -Os builds call FP save/restore functions. The powerpc64
122 * linker generates those on demand in the .sfpr section.
123 * .sfpr gets placed at the beginning of a group of input
124 * sections, which can break start-of-text offset if it is
125 * included with the main text sections, so put it by itself.
126 */
127 *(.sfpr);
128 MEM_KEEP(init.text)
129 MEM_KEEP(exit.text)
130 } :text
131
132 . = ALIGN(PAGE_SIZE);
133 _etext = .;
134 PROVIDE32 (etext = .);
135
136 /* Read-only data */
137 RO_DATA(PAGE_SIZE)
138
139#ifdef CONFIG_PPC32
140 .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
141 *(.sdata2)
142 }
143#endif
144
145 .data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
146 *(.data.rel.ro .data.rel.ro.*)
147 }
148
149 .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {
150 *(.branch_lt)
151 }
152
153#ifdef CONFIG_PPC32
154 .got1 : AT(ADDR(.got1) - LOAD_OFFSET) {
155 *(.got1)
156 }
157 .got2 : AT(ADDR(.got2) - LOAD_OFFSET) {
158 __got2_start = .;
159 *(.got2)
160 __got2_end = .;
161 }
162 .got : AT(ADDR(.got) - LOAD_OFFSET) {
163 *(.got)
164 *(.got.plt)
165 }
166 .plt : AT(ADDR(.plt) - LOAD_OFFSET) {
167 /* XXX: is .plt (and .got.plt) required? */
168 *(.plt)
169 }
170
171#else /* CONFIG_PPC32 */
172 .toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) {
173 *(.toc1)
174 }
175
176 .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
177 *(.got .toc)
178 }
179
180 SOFT_MASK_TABLE(8)
181 RESTART_TABLE(8)
182
183#ifdef CONFIG_PPC64_ELF_ABI_V1
184 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
185 __start_opd = .;
186 KEEP(*(.opd))
187 __end_opd = .;
188 }
189#endif
190
191 . = ALIGN(8);
192 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
193 __start___stf_entry_barrier_fixup = .;
194 *(__stf_entry_barrier_fixup)
195 __stop___stf_entry_barrier_fixup = .;
196 }
197
198 . = ALIGN(8);
199 __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
200 __start___uaccess_flush_fixup = .;
201 *(__uaccess_flush_fixup)
202 __stop___uaccess_flush_fixup = .;
203 }
204
205 . = ALIGN(8);
206 __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
207 __start___entry_flush_fixup = .;
208 *(__entry_flush_fixup)
209 __stop___entry_flush_fixup = .;
210 }
211
212 . = ALIGN(8);
213 __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
214 __start___scv_entry_flush_fixup = .;
215 *(__scv_entry_flush_fixup)
216 __stop___scv_entry_flush_fixup = .;
217 }
218
219 . = ALIGN(8);
220 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
221 __start___stf_exit_barrier_fixup = .;
222 *(__stf_exit_barrier_fixup)
223 __stop___stf_exit_barrier_fixup = .;
224 }
225
226 . = ALIGN(8);
227 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
228 __start___rfi_flush_fixup = .;
229 *(__rfi_flush_fixup)
230 __stop___rfi_flush_fixup = .;
231 }
232#endif /* CONFIG_PPC32 */
233
234#ifdef CONFIG_PPC_BARRIER_NOSPEC
235 . = ALIGN(8);
236 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
237 __start___barrier_nospec_fixup = .;
238 *(__barrier_nospec_fixup)
239 __stop___barrier_nospec_fixup = .;
240 }
241#endif /* CONFIG_PPC_BARRIER_NOSPEC */
242
243#ifdef CONFIG_PPC_E500
244 . = ALIGN(8);
245 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
246 __start__btb_flush_fixup = .;
247 *(__btb_flush_fixup)
248 __stop__btb_flush_fixup = .;
249 }
250#endif
251
252 /*
253 * Various code relies on __init_begin being at the strict RWX boundary.
254 */
255 . = ALIGN(STRICT_ALIGN_SIZE);
256 __srwx_boundary = .;
257 __end_rodata = .;
258 __init_begin = .;
259
260/*
261 * Init sections discarded at runtime
262 */
263 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
264 _sinittext = .;
265 INIT_TEXT
266
267 /*
268 *.init.text might be RO so we must ensure this section ends on
269 * a page boundary.
270 */
271 . = ALIGN(PAGE_SIZE);
272 _einittext = .;
273#ifdef CONFIG_PPC64
274 *(.tramp.ftrace.init);
275#endif
276 } :text
277
278 /* .exit.text is discarded at runtime, not link time,
279 * to deal with references from __bug_table
280 */
281 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
282 EXIT_TEXT
283 }
284
285 . = ALIGN(PAGE_SIZE);
286
287 INIT_DATA_SECTION(16)
288
289 . = ALIGN(8);
290 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
291 __start___ftr_fixup = .;
292 KEEP(*(__ftr_fixup))
293 __stop___ftr_fixup = .;
294 }
295 . = ALIGN(8);
296 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
297 __start___mmu_ftr_fixup = .;
298 KEEP(*(__mmu_ftr_fixup))
299 __stop___mmu_ftr_fixup = .;
300 }
301 . = ALIGN(8);
302 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
303 __start___lwsync_fixup = .;
304 KEEP(*(__lwsync_fixup))
305 __stop___lwsync_fixup = .;
306 }
307#ifdef CONFIG_PPC64
308 . = ALIGN(8);
309 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
310 __start___fw_ftr_fixup = .;
311 KEEP(*(__fw_ftr_fixup))
312 __stop___fw_ftr_fixup = .;
313 }
314#endif
315
316 PERCPU_SECTION(L1_CACHE_BYTES)
317
318 . = ALIGN(8);
319 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
320 __machine_desc_start = . ;
321 KEEP(*(.machine.desc))
322 __machine_desc_end = . ;
323 }
324#ifdef CONFIG_RELOCATABLE
325 . = ALIGN(8);
326 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
327 {
328 __dynamic_symtab = .;
329 *(.dynsym)
330 }
331 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
332 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
333 {
334 __dynamic_start = .;
335 *(.dynamic)
336 }
337 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
338 .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
339 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
340 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
341 {
342 __rela_dyn_start = .;
343 *(.rela*)
344 }
345#endif
346 /* .exit.data is discarded at runtime, not link time,
347 * to deal with references from .exit.text
348 */
349 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
350 EXIT_DATA
351 }
352
353 /* freed after init ends here */
354 . = ALIGN(PAGE_SIZE);
355 __init_end = .;
356
357/*
358 * And now the various read/write data
359 */
360
361 . = ALIGN(PAGE_SIZE);
362 _sdata = .;
363
364 .data : AT(ADDR(.data) - LOAD_OFFSET) {
365 DATA_DATA
366 *(.data.rel*)
367#ifdef CONFIG_PPC32
368 *(SDATA_MAIN)
369#endif
370 }
371
372 /* The initial task and kernel stack */
373 INIT_TASK_DATA_SECTION(THREAD_ALIGN)
374
375 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
376 PAGE_ALIGNED_DATA(PAGE_SIZE)
377 }
378
379 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
380 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
381 }
382
383 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
384 READ_MOSTLY_DATA(L1_CACHE_BYTES)
385 }
386
387 . = ALIGN(PAGE_SIZE);
388 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
389 NOSAVE_DATA
390 }
391
392 BUG_TABLE
393
394 . = ALIGN(PAGE_SIZE);
395 _edata = .;
396 PROVIDE32 (edata = .);
397
398/*
399 * And finally the bss
400 */
401
402 BSS_SECTION(0, 0, 0)
403
404 . = ALIGN(PAGE_SIZE);
405 _end = . ;
406 PROVIDE32 (end = .);
407
408 DWARF_DEBUG
409 ELF_DETAILS
410
411 DISCARDS
412 /DISCARD/ : {
413 *(*.EMB.apuinfo)
414 *(.glink .iplt .plt)
415 *(.gnu.version*)
416 *(.gnu.attributes)
417 *(.eh_frame)
418#ifndef CONFIG_RELOCATABLE
419 *(.rela*)
420#endif
421 }
422}