Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Kernel module help for PPC.
3 Copyright (C) 2001 Rusty Russell.
4
5*/
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/module.h>
10#include <linux/moduleloader.h>
11#include <linux/elf.h>
12#include <linux/vmalloc.h>
13#include <linux/fs.h>
14#include <linux/string.h>
15#include <linux/kernel.h>
16#include <linux/ftrace.h>
17#include <linux/cache.h>
18#include <linux/bug.h>
19#include <linux/sort.h>
20#include <asm/setup.h>
21#include <asm/code-patching.h>
22
23/* Count how many different relocations (different symbol, different
24 addend) */
25static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
26{
27 unsigned int i, r_info, r_addend, _count_relocs;
28
29 _count_relocs = 0;
30 r_info = 0;
31 r_addend = 0;
32 for (i = 0; i < num; i++)
33 /* Only count 24-bit relocs, others don't need stubs */
34 if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
35 (r_info != ELF32_R_SYM(rela[i].r_info) ||
36 r_addend != rela[i].r_addend)) {
37 _count_relocs++;
38 r_info = ELF32_R_SYM(rela[i].r_info);
39 r_addend = rela[i].r_addend;
40 }
41
42#ifdef CONFIG_DYNAMIC_FTRACE
43 _count_relocs++; /* add one for ftrace_caller */
44#endif
45 return _count_relocs;
46}
47
48static int relacmp(const void *_x, const void *_y)
49{
50 const Elf32_Rela *x, *y;
51
52 y = (Elf32_Rela *)_x;
53 x = (Elf32_Rela *)_y;
54
55 /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
56 * make the comparison cheaper/faster. It won't affect the sorting or
57 * the counting algorithms' performance
58 */
59 if (x->r_info < y->r_info)
60 return -1;
61 else if (x->r_info > y->r_info)
62 return 1;
63 else if (x->r_addend < y->r_addend)
64 return -1;
65 else if (x->r_addend > y->r_addend)
66 return 1;
67 else
68 return 0;
69}
70
71/* Get the potential trampolines size required of the init and
72 non-init sections */
73static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
74 const Elf32_Shdr *sechdrs,
75 const char *secstrings,
76 int is_init)
77{
78 unsigned long ret = 0;
79 unsigned i;
80
81 /* Everything marked ALLOC (this includes the exported
82 symbols) */
83 for (i = 1; i < hdr->e_shnum; i++) {
84 /* If it's called *.init*, and we're not init, we're
85 not interested */
86 if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
87 != is_init)
88 continue;
89
90 /* We don't want to look at debug sections. */
91 if (strstr(secstrings + sechdrs[i].sh_name, ".debug"))
92 continue;
93
94 if (sechdrs[i].sh_type == SHT_RELA) {
95 pr_debug("Found relocations in section %u\n", i);
96 pr_debug("Ptr: %p. Number: %u\n",
97 (void *)hdr + sechdrs[i].sh_offset,
98 sechdrs[i].sh_size / sizeof(Elf32_Rela));
99
100 /* Sort the relocation information based on a symbol and
101 * addend key. This is a stable O(n*log n) complexity
102 * algorithm but it will reduce the complexity of
103 * count_relocs() to linear complexity O(n)
104 */
105 sort((void *)hdr + sechdrs[i].sh_offset,
106 sechdrs[i].sh_size / sizeof(Elf32_Rela),
107 sizeof(Elf32_Rela), relacmp, NULL);
108
109 ret += count_relocs((void *)hdr
110 + sechdrs[i].sh_offset,
111 sechdrs[i].sh_size
112 / sizeof(Elf32_Rela))
113 * sizeof(struct ppc_plt_entry);
114 }
115 }
116
117 return ret;
118}
119
120int module_frob_arch_sections(Elf32_Ehdr *hdr,
121 Elf32_Shdr *sechdrs,
122 char *secstrings,
123 struct module *me)
124{
125 unsigned int i;
126
127 /* Find .plt and .init.plt sections */
128 for (i = 0; i < hdr->e_shnum; i++) {
129 if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
130 me->arch.init_plt_section = i;
131 else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
132 me->arch.core_plt_section = i;
133 }
134 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
135 pr_err("Module doesn't contain .plt or .init.plt sections.\n");
136 return -ENOEXEC;
137 }
138
139 /* Override their sizes */
140 sechdrs[me->arch.core_plt_section].sh_size
141 = get_plt_size(hdr, sechdrs, secstrings, 0);
142 sechdrs[me->arch.init_plt_section].sh_size
143 = get_plt_size(hdr, sechdrs, secstrings, 1);
144 return 0;
145}
146
147static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
148{
149 if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
150 return 0;
151 if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
152 return 0;
153 return 1;
154}
155
156/* Set up a trampoline in the PLT to bounce us to the distant function */
157static uint32_t do_plt_call(void *location,
158 Elf32_Addr val,
159 const Elf32_Shdr *sechdrs,
160 struct module *mod)
161{
162 struct ppc_plt_entry *entry;
163
164 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
165 /* Init, or core PLT? */
166 if (location >= mod->core_layout.base
167 && location < mod->core_layout.base + mod->core_layout.size)
168 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
169 else
170 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
171
172 /* Find this entry, or if that fails, the next avail. entry */
173 while (entry->jump[0]) {
174 if (entry_matches(entry, val)) return (uint32_t)entry;
175 entry++;
176 }
177
178 if (patch_instruction(&entry->jump[0], ppc_inst(PPC_RAW_LIS(_R12, PPC_HA(val)))))
179 return 0;
180 if (patch_instruction(&entry->jump[1], ppc_inst(PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))))
181 return 0;
182 if (patch_instruction(&entry->jump[2], ppc_inst(PPC_RAW_MTCTR(_R12))))
183 return 0;
184 if (patch_instruction(&entry->jump[3], ppc_inst(PPC_RAW_BCTR())))
185 return 0;
186
187 pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
188 return (uint32_t)entry;
189}
190
191static int patch_location_16(uint32_t *loc, u16 value)
192{
193 loc = PTR_ALIGN_DOWN(loc, sizeof(u32));
194 return patch_instruction(loc, ppc_inst((*loc & 0xffff0000) | value));
195}
196
197int apply_relocate_add(Elf32_Shdr *sechdrs,
198 const char *strtab,
199 unsigned int symindex,
200 unsigned int relsec,
201 struct module *module)
202{
203 unsigned int i;
204 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
205 Elf32_Sym *sym;
206 uint32_t *location;
207 uint32_t value;
208
209 pr_debug("Applying ADD relocate section %u to %u\n", relsec,
210 sechdrs[relsec].sh_info);
211 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
212 /* This is where to make the change */
213 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
214 + rela[i].r_offset;
215 /* This is the symbol it is referring to. Note that all
216 undefined symbols have been resolved. */
217 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
218 + ELF32_R_SYM(rela[i].r_info);
219 /* `Everything is relative'. */
220 value = sym->st_value + rela[i].r_addend;
221
222 switch (ELF32_R_TYPE(rela[i].r_info)) {
223 case R_PPC_ADDR32:
224 /* Simply set it */
225 *(uint32_t *)location = value;
226 break;
227
228 case R_PPC_ADDR16_LO:
229 /* Low half of the symbol */
230 if (patch_location_16(location, PPC_LO(value)))
231 return -EFAULT;
232 break;
233
234 case R_PPC_ADDR16_HI:
235 /* Higher half of the symbol */
236 if (patch_location_16(location, PPC_HI(value)))
237 return -EFAULT;
238 break;
239
240 case R_PPC_ADDR16_HA:
241 if (patch_location_16(location, PPC_HA(value)))
242 return -EFAULT;
243 break;
244
245 case R_PPC_REL24:
246 if ((int)(value - (uint32_t)location) < -0x02000000
247 || (int)(value - (uint32_t)location) >= 0x02000000) {
248 value = do_plt_call(location, value,
249 sechdrs, module);
250 if (!value)
251 return -EFAULT;
252 }
253
254 /* Only replace bits 2 through 26 */
255 pr_debug("REL24 value = %08X. location = %08X\n",
256 value, (uint32_t)location);
257 pr_debug("Location before: %08X.\n",
258 *(uint32_t *)location);
259 value = (*(uint32_t *)location & ~PPC_LI_MASK) |
260 PPC_LI(value - (uint32_t)location);
261
262 if (patch_instruction(location, ppc_inst(value)))
263 return -EFAULT;
264
265 pr_debug("Location after: %08X.\n",
266 *(uint32_t *)location);
267 pr_debug("ie. jump to %08X+%08X = %08X\n",
268 *(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location,
269 (*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location);
270 break;
271
272 case R_PPC_REL32:
273 /* 32-bit relative jump. */
274 *(uint32_t *)location = value - (uint32_t)location;
275 break;
276
277 default:
278 pr_err("%s: unknown ADD relocation: %u\n",
279 module->name,
280 ELF32_R_TYPE(rela[i].r_info));
281 return -ENOEXEC;
282 }
283 }
284
285 return 0;
286}
287
288#ifdef CONFIG_DYNAMIC_FTRACE
289notrace int module_trampoline_target(struct module *mod, unsigned long addr,
290 unsigned long *target)
291{
292 ppc_inst_t jmp[4];
293
294 /* Find where the trampoline jumps to */
295 if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
296 return -EFAULT;
297 if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
298 return -EFAULT;
299 if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
300 return -EFAULT;
301 if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
302 return -EFAULT;
303
304 /* verify that this is what we expect it to be */
305 if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
306 return -EINVAL;
307 if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
308 return -EINVAL;
309 if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
310 return -EINVAL;
311 if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
312 return -EINVAL;
313
314 addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
315 if (addr & 0x8000)
316 addr -= 0x10000;
317
318 *target = addr;
319
320 return 0;
321}
322
323int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
324{
325 module->arch.tramp = do_plt_call(module->core_layout.base,
326 (unsigned long)ftrace_caller,
327 sechdrs, module);
328 if (!module->arch.tramp)
329 return -ENOENT;
330
331#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
332 module->arch.tramp_regs = do_plt_call(module->core_layout.base,
333 (unsigned long)ftrace_regs_caller,
334 sechdrs, module);
335 if (!module->arch.tramp_regs)
336 return -ENOENT;
337#endif
338
339 return 0;
340}
341#endif
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Kernel module help for PPC.
3 Copyright (C) 2001 Rusty Russell.
4
5*/
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/module.h>
10#include <linux/moduleloader.h>
11#include <linux/elf.h>
12#include <linux/vmalloc.h>
13#include <linux/fs.h>
14#include <linux/string.h>
15#include <linux/kernel.h>
16#include <linux/ftrace.h>
17#include <linux/cache.h>
18#include <linux/bug.h>
19#include <linux/sort.h>
20#include <asm/setup.h>
21
22/* Count how many different relocations (different symbol, different
23 addend) */
24static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
25{
26 unsigned int i, r_info, r_addend, _count_relocs;
27
28 _count_relocs = 0;
29 r_info = 0;
30 r_addend = 0;
31 for (i = 0; i < num; i++)
32 /* Only count 24-bit relocs, others don't need stubs */
33 if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
34 (r_info != ELF32_R_SYM(rela[i].r_info) ||
35 r_addend != rela[i].r_addend)) {
36 _count_relocs++;
37 r_info = ELF32_R_SYM(rela[i].r_info);
38 r_addend = rela[i].r_addend;
39 }
40
41#ifdef CONFIG_DYNAMIC_FTRACE
42 _count_relocs++; /* add one for ftrace_caller */
43#endif
44 return _count_relocs;
45}
46
47static int relacmp(const void *_x, const void *_y)
48{
49 const Elf32_Rela *x, *y;
50
51 y = (Elf32_Rela *)_x;
52 x = (Elf32_Rela *)_y;
53
54 /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
55 * make the comparison cheaper/faster. It won't affect the sorting or
56 * the counting algorithms' performance
57 */
58 if (x->r_info < y->r_info)
59 return -1;
60 else if (x->r_info > y->r_info)
61 return 1;
62 else if (x->r_addend < y->r_addend)
63 return -1;
64 else if (x->r_addend > y->r_addend)
65 return 1;
66 else
67 return 0;
68}
69
70/* Get the potential trampolines size required of the init and
71 non-init sections */
72static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
73 const Elf32_Shdr *sechdrs,
74 const char *secstrings,
75 int is_init)
76{
77 unsigned long ret = 0;
78 unsigned i;
79
80 /* Everything marked ALLOC (this includes the exported
81 symbols) */
82 for (i = 1; i < hdr->e_shnum; i++) {
83 /* If it's called *.init*, and we're not init, we're
84 not interested */
85 if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
86 != is_init)
87 continue;
88
89 /* We don't want to look at debug sections. */
90 if (strstr(secstrings + sechdrs[i].sh_name, ".debug"))
91 continue;
92
93 if (sechdrs[i].sh_type == SHT_RELA) {
94 pr_debug("Found relocations in section %u\n", i);
95 pr_debug("Ptr: %p. Number: %u\n",
96 (void *)hdr + sechdrs[i].sh_offset,
97 sechdrs[i].sh_size / sizeof(Elf32_Rela));
98
99 /* Sort the relocation information based on a symbol and
100 * addend key. This is a stable O(n*log n) complexity
101 * alogrithm but it will reduce the complexity of
102 * count_relocs() to linear complexity O(n)
103 */
104 sort((void *)hdr + sechdrs[i].sh_offset,
105 sechdrs[i].sh_size / sizeof(Elf32_Rela),
106 sizeof(Elf32_Rela), relacmp, NULL);
107
108 ret += count_relocs((void *)hdr
109 + sechdrs[i].sh_offset,
110 sechdrs[i].sh_size
111 / sizeof(Elf32_Rela))
112 * sizeof(struct ppc_plt_entry);
113 }
114 }
115
116 return ret;
117}
118
119int module_frob_arch_sections(Elf32_Ehdr *hdr,
120 Elf32_Shdr *sechdrs,
121 char *secstrings,
122 struct module *me)
123{
124 unsigned int i;
125
126 /* Find .plt and .init.plt sections */
127 for (i = 0; i < hdr->e_shnum; i++) {
128 if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
129 me->arch.init_plt_section = i;
130 else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
131 me->arch.core_plt_section = i;
132 }
133 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
134 pr_err("Module doesn't contain .plt or .init.plt sections.\n");
135 return -ENOEXEC;
136 }
137
138 /* Override their sizes */
139 sechdrs[me->arch.core_plt_section].sh_size
140 = get_plt_size(hdr, sechdrs, secstrings, 0);
141 sechdrs[me->arch.init_plt_section].sh_size
142 = get_plt_size(hdr, sechdrs, secstrings, 1);
143 return 0;
144}
145
146static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
147{
148 if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
149 return 0;
150 if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
151 PPC_LO(val)))
152 return 0;
153 return 1;
154}
155
156/* Set up a trampoline in the PLT to bounce us to the distant function */
157static uint32_t do_plt_call(void *location,
158 Elf32_Addr val,
159 const Elf32_Shdr *sechdrs,
160 struct module *mod)
161{
162 struct ppc_plt_entry *entry;
163
164 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
165 /* Init, or core PLT? */
166 if (location >= mod->core_layout.base
167 && location < mod->core_layout.base + mod->core_layout.size)
168 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
169 else
170 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
171
172 /* Find this entry, or if that fails, the next avail. entry */
173 while (entry->jump[0]) {
174 if (entry_matches(entry, val)) return (uint32_t)entry;
175 entry++;
176 }
177
178 /*
179 * lis r12, sym@ha
180 * addi r12, r12, sym@l
181 * mtctr r12
182 * bctr
183 */
184 entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
185 entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
186 entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
187 entry->jump[3] = PPC_INST_BCTR;
188
189 pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
190 return (uint32_t)entry;
191}
192
193int apply_relocate_add(Elf32_Shdr *sechdrs,
194 const char *strtab,
195 unsigned int symindex,
196 unsigned int relsec,
197 struct module *module)
198{
199 unsigned int i;
200 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
201 Elf32_Sym *sym;
202 uint32_t *location;
203 uint32_t value;
204
205 pr_debug("Applying ADD relocate section %u to %u\n", relsec,
206 sechdrs[relsec].sh_info);
207 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
208 /* This is where to make the change */
209 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
210 + rela[i].r_offset;
211 /* This is the symbol it is referring to. Note that all
212 undefined symbols have been resolved. */
213 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
214 + ELF32_R_SYM(rela[i].r_info);
215 /* `Everything is relative'. */
216 value = sym->st_value + rela[i].r_addend;
217
218 switch (ELF32_R_TYPE(rela[i].r_info)) {
219 case R_PPC_ADDR32:
220 /* Simply set it */
221 *(uint32_t *)location = value;
222 break;
223
224 case R_PPC_ADDR16_LO:
225 /* Low half of the symbol */
226 *(uint16_t *)location = value;
227 break;
228
229 case R_PPC_ADDR16_HI:
230 /* Higher half of the symbol */
231 *(uint16_t *)location = (value >> 16);
232 break;
233
234 case R_PPC_ADDR16_HA:
235 /* Sign-adjusted lower 16 bits: PPC ELF ABI says:
236 (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
237 This is the same, only sane.
238 */
239 *(uint16_t *)location = (value + 0x8000) >> 16;
240 break;
241
242 case R_PPC_REL24:
243 if ((int)(value - (uint32_t)location) < -0x02000000
244 || (int)(value - (uint32_t)location) >= 0x02000000)
245 value = do_plt_call(location, value,
246 sechdrs, module);
247
248 /* Only replace bits 2 through 26 */
249 pr_debug("REL24 value = %08X. location = %08X\n",
250 value, (uint32_t)location);
251 pr_debug("Location before: %08X.\n",
252 *(uint32_t *)location);
253 *(uint32_t *)location
254 = (*(uint32_t *)location & ~0x03fffffc)
255 | ((value - (uint32_t)location)
256 & 0x03fffffc);
257 pr_debug("Location after: %08X.\n",
258 *(uint32_t *)location);
259 pr_debug("ie. jump to %08X+%08X = %08X\n",
260 *(uint32_t *)location & 0x03fffffc,
261 (uint32_t)location,
262 (*(uint32_t *)location & 0x03fffffc)
263 + (uint32_t)location);
264 break;
265
266 case R_PPC_REL32:
267 /* 32-bit relative jump. */
268 *(uint32_t *)location = value - (uint32_t)location;
269 break;
270
271 default:
272 pr_err("%s: unknown ADD relocation: %u\n",
273 module->name,
274 ELF32_R_TYPE(rela[i].r_info));
275 return -ENOEXEC;
276 }
277 }
278
279 return 0;
280}
281
282#ifdef CONFIG_DYNAMIC_FTRACE
283int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
284{
285 module->arch.tramp = do_plt_call(module->core_layout.base,
286 (unsigned long)ftrace_caller,
287 sechdrs, module);
288 if (!module->arch.tramp)
289 return -ENOENT;
290
291 return 0;
292}
293#endif