Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Kernel module help for PPC64.
3 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
4
5*/
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/module.h>
10#include <linux/elf.h>
11#include <linux/moduleloader.h>
12#include <linux/err.h>
13#include <linux/vmalloc.h>
14#include <linux/ftrace.h>
15#include <linux/bug.h>
16#include <linux/uaccess.h>
17#include <asm/module.h>
18#include <asm/firmware.h>
19#include <asm/code-patching.h>
20#include <linux/sort.h>
21#include <asm/setup.h>
22#include <asm/sections.h>
23#include <asm/inst.h>
24
25/* FIXME: We don't do .init separately. To do this, we'd need to have
26 a separate r2 value in the init and core section, and stub between
27 them, too.
28
29 Using a magic allocator which places modules within 32MB solves
30 this, and makes other things simpler. Anton?
31 --RR. */
32
33#ifdef PPC64_ELF_ABI_v2
34
35/* An address is simply the address of the function. */
36typedef unsigned long func_desc_t;
37
38static func_desc_t func_desc(unsigned long addr)
39{
40 return addr;
41}
42static unsigned long func_addr(unsigned long addr)
43{
44 return addr;
45}
46static unsigned long stub_func_addr(func_desc_t func)
47{
48 return func;
49}
50
51/* PowerPC64 specific values for the Elf64_Sym st_other field. */
52#define STO_PPC64_LOCAL_BIT 5
53#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT)
54#define PPC64_LOCAL_ENTRY_OFFSET(other) \
55 (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
56
57static unsigned int local_entry_offset(const Elf64_Sym *sym)
58{
59 /* sym->st_other indicates offset to local entry point
60 * (otherwise it will assume r12 is the address of the start
61 * of function and try to derive r2 from it). */
62 return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
63}
64#else
65
66/* An address is address of the OPD entry, which contains address of fn. */
67typedef struct ppc64_opd_entry func_desc_t;
68
69static func_desc_t func_desc(unsigned long addr)
70{
71 return *(struct ppc64_opd_entry *)addr;
72}
73static unsigned long func_addr(unsigned long addr)
74{
75 return func_desc(addr).funcaddr;
76}
77static unsigned long stub_func_addr(func_desc_t func)
78{
79 return func.funcaddr;
80}
81static unsigned int local_entry_offset(const Elf64_Sym *sym)
82{
83 return 0;
84}
85
86void *dereference_module_function_descriptor(struct module *mod, void *ptr)
87{
88 if (ptr < (void *)mod->arch.start_opd ||
89 ptr >= (void *)mod->arch.end_opd)
90 return ptr;
91
92 return dereference_function_descriptor(ptr);
93}
94#endif
95
96#define STUB_MAGIC 0x73747562 /* stub */
97
98/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
99 the kernel itself). But on PPC64, these need to be used for every
100 jump, actually, to reset r2 (TOC+0x8000). */
101struct ppc64_stub_entry
102{
103 /* 28 byte jump instruction sequence (7 instructions). We only
104 * need 6 instructions on ABIv2 but we always allocate 7 so
105 * so we don't have to modify the trampoline load instruction. */
106 u32 jump[7];
107 /* Used by ftrace to identify stubs */
108 u32 magic;
109 /* Data for the above code */
110 func_desc_t funcdata;
111};
112
113/*
114 * PPC64 uses 24 bit jumps, but we need to jump into other modules or
115 * the kernel which may be further. So we jump to a stub.
116 *
117 * For ELFv1 we need to use this to set up the new r2 value (aka TOC
118 * pointer). For ELFv2 it's the callee's responsibility to set up the
119 * new r2, but for both we need to save the old r2.
120 *
121 * We could simply patch the new r2 value and function pointer into
122 * the stub, but it's significantly shorter to put these values at the
123 * end of the stub code, and patch the stub address (32-bits relative
124 * to the TOC ptr, r2) into the stub.
125 */
126static u32 ppc64_stub_insns[] = {
127 PPC_RAW_ADDIS(_R11, _R2, 0),
128 PPC_RAW_ADDI(_R11, _R11, 0),
129 /* Save current r2 value in magic place on the stack. */
130 PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
131 PPC_RAW_LD(_R12, _R11, 32),
132#ifdef PPC64_ELF_ABI_v1
133 /* Set up new r2 from function descriptor */
134 PPC_RAW_LD(_R2, _R11, 40),
135#endif
136 PPC_RAW_MTCTR(_R12),
137 PPC_RAW_BCTR(),
138};
139
140/* Count how many different 24-bit relocations (different symbol,
141 different addend) */
142static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
143{
144 unsigned int i, r_info, r_addend, _count_relocs;
145
146 /* FIXME: Only count external ones --RR */
147 _count_relocs = 0;
148 r_info = 0;
149 r_addend = 0;
150 for (i = 0; i < num; i++)
151 /* Only count 24-bit relocs, others don't need stubs */
152 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
153 (r_info != ELF64_R_SYM(rela[i].r_info) ||
154 r_addend != rela[i].r_addend)) {
155 _count_relocs++;
156 r_info = ELF64_R_SYM(rela[i].r_info);
157 r_addend = rela[i].r_addend;
158 }
159
160 return _count_relocs;
161}
162
163static int relacmp(const void *_x, const void *_y)
164{
165 const Elf64_Rela *x, *y;
166
167 y = (Elf64_Rela *)_x;
168 x = (Elf64_Rela *)_y;
169
170 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
171 * make the comparison cheaper/faster. It won't affect the sorting or
172 * the counting algorithms' performance
173 */
174 if (x->r_info < y->r_info)
175 return -1;
176 else if (x->r_info > y->r_info)
177 return 1;
178 else if (x->r_addend < y->r_addend)
179 return -1;
180 else if (x->r_addend > y->r_addend)
181 return 1;
182 else
183 return 0;
184}
185
186/* Get size of potential trampolines required. */
187static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
188 const Elf64_Shdr *sechdrs)
189{
190 /* One extra reloc so it's always 0-funcaddr terminated */
191 unsigned long relocs = 1;
192 unsigned i;
193
194 /* Every relocated section... */
195 for (i = 1; i < hdr->e_shnum; i++) {
196 if (sechdrs[i].sh_type == SHT_RELA) {
197 pr_debug("Found relocations in section %u\n", i);
198 pr_debug("Ptr: %p. Number: %Lu\n",
199 (void *)sechdrs[i].sh_addr,
200 sechdrs[i].sh_size / sizeof(Elf64_Rela));
201
202 /* Sort the relocation information based on a symbol and
203 * addend key. This is a stable O(n*log n) complexity
204 * alogrithm but it will reduce the complexity of
205 * count_relocs() to linear complexity O(n)
206 */
207 sort((void *)sechdrs[i].sh_addr,
208 sechdrs[i].sh_size / sizeof(Elf64_Rela),
209 sizeof(Elf64_Rela), relacmp, NULL);
210
211 relocs += count_relocs((void *)sechdrs[i].sh_addr,
212 sechdrs[i].sh_size
213 / sizeof(Elf64_Rela));
214 }
215 }
216
217#ifdef CONFIG_DYNAMIC_FTRACE
218 /* make the trampoline to the ftrace_caller */
219 relocs++;
220#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
221 /* an additional one for ftrace_regs_caller */
222 relocs++;
223#endif
224#endif
225
226 pr_debug("Looks like a total of %lu stubs, max\n", relocs);
227 return relocs * sizeof(struct ppc64_stub_entry);
228}
229
230/* Still needed for ELFv2, for .TOC. */
231static void dedotify_versions(struct modversion_info *vers,
232 unsigned long size)
233{
234 struct modversion_info *end;
235
236 for (end = (void *)vers + size; vers < end; vers++)
237 if (vers->name[0] == '.') {
238 memmove(vers->name, vers->name+1, strlen(vers->name));
239 }
240}
241
242/*
243 * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
244 * seem to be defined (value set later).
245 */
246static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
247{
248 unsigned int i;
249
250 for (i = 1; i < numsyms; i++) {
251 if (syms[i].st_shndx == SHN_UNDEF) {
252 char *name = strtab + syms[i].st_name;
253 if (name[0] == '.') {
254 if (strcmp(name+1, "TOC.") == 0)
255 syms[i].st_shndx = SHN_ABS;
256 syms[i].st_name++;
257 }
258 }
259 }
260}
261
262static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
263 const char *strtab,
264 unsigned int symindex)
265{
266 unsigned int i, numsyms;
267 Elf64_Sym *syms;
268
269 syms = (Elf64_Sym *)sechdrs[symindex].sh_addr;
270 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
271
272 for (i = 1; i < numsyms; i++) {
273 if (syms[i].st_shndx == SHN_ABS
274 && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
275 return &syms[i];
276 }
277 return NULL;
278}
279
280int module_frob_arch_sections(Elf64_Ehdr *hdr,
281 Elf64_Shdr *sechdrs,
282 char *secstrings,
283 struct module *me)
284{
285 unsigned int i;
286
287 /* Find .toc and .stubs sections, symtab and strtab */
288 for (i = 1; i < hdr->e_shnum; i++) {
289 char *p;
290 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
291 me->arch.stubs_section = i;
292 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) {
293 me->arch.toc_section = i;
294 if (sechdrs[i].sh_addralign < 8)
295 sechdrs[i].sh_addralign = 8;
296 }
297 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
298 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
299 sechdrs[i].sh_size);
300
301 /* We don't handle .init for the moment: rename to _init */
302 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
303 p[0] = '_';
304
305 if (sechdrs[i].sh_type == SHT_SYMTAB)
306 dedotify((void *)hdr + sechdrs[i].sh_offset,
307 sechdrs[i].sh_size / sizeof(Elf64_Sym),
308 (void *)hdr
309 + sechdrs[sechdrs[i].sh_link].sh_offset);
310 }
311
312 if (!me->arch.stubs_section) {
313 pr_err("%s: doesn't contain .stubs.\n", me->name);
314 return -ENOEXEC;
315 }
316
317 /* If we don't have a .toc, just use .stubs. We need to set r2
318 to some reasonable value in case the module calls out to
319 other functions via a stub, or if a function pointer escapes
320 the module by some means. */
321 if (!me->arch.toc_section)
322 me->arch.toc_section = me->arch.stubs_section;
323
324 /* Override the stubs size */
325 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
326 return 0;
327}
328
329#ifdef CONFIG_MPROFILE_KERNEL
330
331static u32 stub_insns[] = {
332 PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
333 PPC_RAW_ADDIS(_R12, _R12, 0),
334 PPC_RAW_ADDI(_R12, _R12, 0),
335 PPC_RAW_MTCTR(_R12),
336 PPC_RAW_BCTR(),
337};
338
339/*
340 * For mprofile-kernel we use a special stub for ftrace_caller() because we
341 * can't rely on r2 containing this module's TOC when we enter the stub.
342 *
343 * That can happen if the function calling us didn't need to use the toc. In
344 * that case it won't have setup r2, and the r2 value will be either the
345 * kernel's toc, or possibly another modules toc.
346 *
347 * To deal with that this stub uses the kernel toc, which is always accessible
348 * via the paca (in r13). The target (ftrace_caller()) is responsible for
349 * saving and restoring the toc before returning.
350 */
351static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
352 unsigned long addr,
353 struct module *me)
354{
355 long reladdr;
356
357 memcpy(entry->jump, stub_insns, sizeof(stub_insns));
358
359 /* Stub uses address relative to kernel toc (from the paca) */
360 reladdr = addr - kernel_toc_addr();
361 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
362 pr_err("%s: Address of %ps out of range of kernel_toc.\n",
363 me->name, (void *)addr);
364 return 0;
365 }
366
367 entry->jump[1] |= PPC_HA(reladdr);
368 entry->jump[2] |= PPC_LO(reladdr);
369
370 /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
371 entry->funcdata = func_desc(addr);
372 entry->magic = STUB_MAGIC;
373
374 return 1;
375}
376
377static bool is_mprofile_ftrace_call(const char *name)
378{
379 if (!strcmp("_mcount", name))
380 return true;
381#ifdef CONFIG_DYNAMIC_FTRACE
382 if (!strcmp("ftrace_caller", name))
383 return true;
384#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
385 if (!strcmp("ftrace_regs_caller", name))
386 return true;
387#endif
388#endif
389
390 return false;
391}
392#else
393static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
394 unsigned long addr,
395 struct module *me)
396{
397 return 0;
398}
399
400static bool is_mprofile_ftrace_call(const char *name)
401{
402 return false;
403}
404#endif
405
406/*
407 * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
408 * value maximum span in an instruction which uses a signed offset). Round down
409 * to a 256 byte boundary for the odd case where we are setting up r2 without a
410 * .toc section.
411 */
412static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
413{
414 return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000;
415}
416
417/* Patch stub to reference function and correct r2 value. */
418static inline int create_stub(const Elf64_Shdr *sechdrs,
419 struct ppc64_stub_entry *entry,
420 unsigned long addr,
421 struct module *me,
422 const char *name)
423{
424 long reladdr;
425
426 if (is_mprofile_ftrace_call(name))
427 return create_ftrace_stub(entry, addr, me);
428
429 memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
430
431 /* Stub uses address relative to r2. */
432 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
433 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
434 pr_err("%s: Address %p of stub out of range of %p.\n",
435 me->name, (void *)reladdr, (void *)my_r2);
436 return 0;
437 }
438 pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
439
440 entry->jump[0] |= PPC_HA(reladdr);
441 entry->jump[1] |= PPC_LO(reladdr);
442 entry->funcdata = func_desc(addr);
443 entry->magic = STUB_MAGIC;
444
445 return 1;
446}
447
448/* Create stub to jump to function described in this OPD/ptr: we need the
449 stub to set up the TOC ptr (r2) for the function. */
450static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
451 unsigned long addr,
452 struct module *me,
453 const char *name)
454{
455 struct ppc64_stub_entry *stubs;
456 unsigned int i, num_stubs;
457
458 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
459
460 /* Find this stub, or if that fails, the next avail. entry */
461 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
462 for (i = 0; stub_func_addr(stubs[i].funcdata); i++) {
463 if (WARN_ON(i >= num_stubs))
464 return 0;
465
466 if (stub_func_addr(stubs[i].funcdata) == func_addr(addr))
467 return (unsigned long)&stubs[i];
468 }
469
470 if (!create_stub(sechdrs, &stubs[i], addr, me, name))
471 return 0;
472
473 return (unsigned long)&stubs[i];
474}
475
476/* We expect a noop next: if it is, replace it with instruction to
477 restore r2. */
478static int restore_r2(const char *name, u32 *instruction, struct module *me)
479{
480 u32 *prev_insn = instruction - 1;
481
482 if (is_mprofile_ftrace_call(name))
483 return 1;
484
485 /*
486 * Make sure the branch isn't a sibling call. Sibling calls aren't
487 * "link" branches and they don't return, so they don't need the r2
488 * restore afterwards.
489 */
490 if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
491 return 1;
492
493 if (*instruction != PPC_RAW_NOP()) {
494 pr_err("%s: Expected nop after call, got %08x at %pS\n",
495 me->name, *instruction, instruction);
496 return 0;
497 }
498 /* ld r2,R2_STACK_OFFSET(r1) */
499 *instruction = PPC_INST_LD_TOC;
500 return 1;
501}
502
503int apply_relocate_add(Elf64_Shdr *sechdrs,
504 const char *strtab,
505 unsigned int symindex,
506 unsigned int relsec,
507 struct module *me)
508{
509 unsigned int i;
510 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
511 Elf64_Sym *sym;
512 unsigned long *location;
513 unsigned long value;
514
515 pr_debug("Applying ADD relocate section %u to %u\n", relsec,
516 sechdrs[relsec].sh_info);
517
518 /* First time we're called, we can fix up .TOC. */
519 if (!me->arch.toc_fixed) {
520 sym = find_dot_toc(sechdrs, strtab, symindex);
521 /* It's theoretically possible that a module doesn't want a
522 * .TOC. so don't fail it just for that. */
523 if (sym)
524 sym->st_value = my_r2(sechdrs, me);
525 me->arch.toc_fixed = true;
526 }
527
528 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
529 /* This is where to make the change */
530 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
531 + rela[i].r_offset;
532 /* This is the symbol it is referring to */
533 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
534 + ELF64_R_SYM(rela[i].r_info);
535
536 pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
537 location, (long)ELF64_R_TYPE(rela[i].r_info),
538 strtab + sym->st_name, (unsigned long)sym->st_value,
539 (long)rela[i].r_addend);
540
541 /* `Everything is relative'. */
542 value = sym->st_value + rela[i].r_addend;
543
544 switch (ELF64_R_TYPE(rela[i].r_info)) {
545 case R_PPC64_ADDR32:
546 /* Simply set it */
547 *(u32 *)location = value;
548 break;
549
550 case R_PPC64_ADDR64:
551 /* Simply set it */
552 *(unsigned long *)location = value;
553 break;
554
555 case R_PPC64_TOC:
556 *(unsigned long *)location = my_r2(sechdrs, me);
557 break;
558
559 case R_PPC64_TOC16:
560 /* Subtract TOC pointer */
561 value -= my_r2(sechdrs, me);
562 if (value + 0x8000 > 0xffff) {
563 pr_err("%s: bad TOC16 relocation (0x%lx)\n",
564 me->name, value);
565 return -ENOEXEC;
566 }
567 *((uint16_t *) location)
568 = (*((uint16_t *) location) & ~0xffff)
569 | (value & 0xffff);
570 break;
571
572 case R_PPC64_TOC16_LO:
573 /* Subtract TOC pointer */
574 value -= my_r2(sechdrs, me);
575 *((uint16_t *) location)
576 = (*((uint16_t *) location) & ~0xffff)
577 | (value & 0xffff);
578 break;
579
580 case R_PPC64_TOC16_DS:
581 /* Subtract TOC pointer */
582 value -= my_r2(sechdrs, me);
583 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
584 pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
585 me->name, value);
586 return -ENOEXEC;
587 }
588 *((uint16_t *) location)
589 = (*((uint16_t *) location) & ~0xfffc)
590 | (value & 0xfffc);
591 break;
592
593 case R_PPC64_TOC16_LO_DS:
594 /* Subtract TOC pointer */
595 value -= my_r2(sechdrs, me);
596 if ((value & 3) != 0) {
597 pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
598 me->name, value);
599 return -ENOEXEC;
600 }
601 *((uint16_t *) location)
602 = (*((uint16_t *) location) & ~0xfffc)
603 | (value & 0xfffc);
604 break;
605
606 case R_PPC64_TOC16_HA:
607 /* Subtract TOC pointer */
608 value -= my_r2(sechdrs, me);
609 value = ((value + 0x8000) >> 16);
610 *((uint16_t *) location)
611 = (*((uint16_t *) location) & ~0xffff)
612 | (value & 0xffff);
613 break;
614
615 case R_PPC_REL24:
616 /* FIXME: Handle weak symbols here --RR */
617 if (sym->st_shndx == SHN_UNDEF ||
618 sym->st_shndx == SHN_LIVEPATCH) {
619 /* External: go via stub */
620 value = stub_for_addr(sechdrs, value, me,
621 strtab + sym->st_name);
622 if (!value)
623 return -ENOENT;
624 if (!restore_r2(strtab + sym->st_name,
625 (u32 *)location + 1, me))
626 return -ENOEXEC;
627 } else
628 value += local_entry_offset(sym);
629
630 /* Convert value to relative */
631 value -= (unsigned long)location;
632 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
633 pr_err("%s: REL24 %li out of range!\n",
634 me->name, (long int)value);
635 return -ENOEXEC;
636 }
637
638 /* Only replace bits 2 through 26 */
639 *(uint32_t *)location
640 = (*(uint32_t *)location & ~0x03fffffc)
641 | (value & 0x03fffffc);
642 break;
643
644 case R_PPC64_REL64:
645 /* 64 bits relative (used by features fixups) */
646 *location = value - (unsigned long)location;
647 break;
648
649 case R_PPC64_REL32:
650 /* 32 bits relative (used by relative exception tables) */
651 /* Convert value to relative */
652 value -= (unsigned long)location;
653 if (value + 0x80000000 > 0xffffffff) {
654 pr_err("%s: REL32 %li out of range!\n",
655 me->name, (long int)value);
656 return -ENOEXEC;
657 }
658 *(u32 *)location = value;
659 break;
660
661 case R_PPC64_TOCSAVE:
662 /*
663 * Marker reloc indicates we don't have to save r2.
664 * That would only save us one instruction, so ignore
665 * it.
666 */
667 break;
668
669 case R_PPC64_ENTRY:
670 /*
671 * Optimize ELFv2 large code model entry point if
672 * the TOC is within 2GB range of current location.
673 */
674 value = my_r2(sechdrs, me) - (unsigned long)location;
675 if (value + 0x80008000 > 0xffffffff)
676 break;
677 /*
678 * Check for the large code model prolog sequence:
679 * ld r2, ...(r12)
680 * add r2, r2, r12
681 */
682 if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0))
683 break;
684 if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12))
685 break;
686 /*
687 * If found, replace it with:
688 * addis r2, r12, (.TOC.-func)@ha
689 * addi r2, r2, (.TOC.-func)@l
690 */
691 ((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value));
692 ((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value));
693 break;
694
695 case R_PPC64_REL16_HA:
696 /* Subtract location pointer */
697 value -= (unsigned long)location;
698 value = ((value + 0x8000) >> 16);
699 *((uint16_t *) location)
700 = (*((uint16_t *) location) & ~0xffff)
701 | (value & 0xffff);
702 break;
703
704 case R_PPC64_REL16_LO:
705 /* Subtract location pointer */
706 value -= (unsigned long)location;
707 *((uint16_t *) location)
708 = (*((uint16_t *) location) & ~0xffff)
709 | (value & 0xffff);
710 break;
711
712 default:
713 pr_err("%s: Unknown ADD relocation: %lu\n",
714 me->name,
715 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
716 return -ENOEXEC;
717 }
718 }
719
720 return 0;
721}
722
723#ifdef CONFIG_DYNAMIC_FTRACE
724int module_trampoline_target(struct module *mod, unsigned long addr,
725 unsigned long *target)
726{
727 struct ppc64_stub_entry *stub;
728 func_desc_t funcdata;
729 u32 magic;
730
731 if (!within_module_core(addr, mod)) {
732 pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
733 return -EFAULT;
734 }
735
736 stub = (struct ppc64_stub_entry *)addr;
737
738 if (copy_from_kernel_nofault(&magic, &stub->magic,
739 sizeof(magic))) {
740 pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
741 return -EFAULT;
742 }
743
744 if (magic != STUB_MAGIC) {
745 pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
746 return -EFAULT;
747 }
748
749 if (copy_from_kernel_nofault(&funcdata, &stub->funcdata,
750 sizeof(funcdata))) {
751 pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
752 return -EFAULT;
753 }
754
755 *target = stub_func_addr(funcdata);
756
757 return 0;
758}
759
760int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
761{
762 mod->arch.tramp = stub_for_addr(sechdrs,
763 (unsigned long)ftrace_caller,
764 mod,
765 "ftrace_caller");
766#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
767 mod->arch.tramp_regs = stub_for_addr(sechdrs,
768 (unsigned long)ftrace_regs_caller,
769 mod,
770 "ftrace_regs_caller");
771 if (!mod->arch.tramp_regs)
772 return -ENOENT;
773#endif
774
775 if (!mod->arch.tramp)
776 return -ENOENT;
777
778 return 0;
779}
780#endif
1/* Kernel module help for PPC64.
2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/
18#include <linux/module.h>
19#include <linux/elf.h>
20#include <linux/moduleloader.h>
21#include <linux/err.h>
22#include <linux/vmalloc.h>
23#include <linux/ftrace.h>
24#include <linux/bug.h>
25#include <asm/module.h>
26#include <asm/firmware.h>
27#include <asm/code-patching.h>
28#include <linux/sort.h>
29
30#include "setup.h"
31
32/* FIXME: We don't do .init separately. To do this, we'd need to have
33 a separate r2 value in the init and core section, and stub between
34 them, too.
35
36 Using a magic allocator which places modules within 32MB solves
37 this, and makes other things simpler. Anton?
38 --RR. */
39#if 0
40#define DEBUGP printk
41#else
42#define DEBUGP(fmt , ...)
43#endif
44
45/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
46 the kernel itself). But on PPC64, these need to be used for every
47 jump, actually, to reset r2 (TOC+0x8000). */
48struct ppc64_stub_entry
49{
50 /* 28 byte jump instruction sequence (7 instructions) */
51 unsigned char jump[28];
52 unsigned char unused[4];
53 /* Data for the above code */
54 struct ppc64_opd_entry opd;
55};
56
57/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
58 function which may be more than 24-bits away. We could simply
59 patch the new r2 value and function pointer into the stub, but it's
60 significantly shorter to put these values at the end of the stub
61 code, and patch the stub address (32-bits relative to the TOC ptr,
62 r2) into the stub. */
63static struct ppc64_stub_entry ppc64_stub =
64{ .jump = {
65 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */
66 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */
67 /* Save current r2 value in magic place on the stack. */
68 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */
69 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */
70 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */
71 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */
72 0x4e, 0x80, 0x04, 0x20 /* bctr */
73} };
74
75/* Count how many different 24-bit relocations (different symbol,
76 different addend) */
77static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
78{
79 unsigned int i, r_info, r_addend, _count_relocs;
80
81 /* FIXME: Only count external ones --RR */
82 _count_relocs = 0;
83 r_info = 0;
84 r_addend = 0;
85 for (i = 0; i < num; i++)
86 /* Only count 24-bit relocs, others don't need stubs */
87 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
88 (r_info != ELF64_R_SYM(rela[i].r_info) ||
89 r_addend != rela[i].r_addend)) {
90 _count_relocs++;
91 r_info = ELF64_R_SYM(rela[i].r_info);
92 r_addend = rela[i].r_addend;
93 }
94
95 return _count_relocs;
96}
97
98static int relacmp(const void *_x, const void *_y)
99{
100 const Elf64_Rela *x, *y;
101
102 y = (Elf64_Rela *)_x;
103 x = (Elf64_Rela *)_y;
104
105 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
106 * make the comparison cheaper/faster. It won't affect the sorting or
107 * the counting algorithms' performance
108 */
109 if (x->r_info < y->r_info)
110 return -1;
111 else if (x->r_info > y->r_info)
112 return 1;
113 else if (x->r_addend < y->r_addend)
114 return -1;
115 else if (x->r_addend > y->r_addend)
116 return 1;
117 else
118 return 0;
119}
120
121static void relaswap(void *_x, void *_y, int size)
122{
123 uint64_t *x, *y, tmp;
124 int i;
125
126 y = (uint64_t *)_x;
127 x = (uint64_t *)_y;
128
129 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
130 tmp = x[i];
131 x[i] = y[i];
132 y[i] = tmp;
133 }
134}
135
136/* Get size of potential trampolines required. */
137static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
138 const Elf64_Shdr *sechdrs)
139{
140 /* One extra reloc so it's always 0-funcaddr terminated */
141 unsigned long relocs = 1;
142 unsigned i;
143
144 /* Every relocated section... */
145 for (i = 1; i < hdr->e_shnum; i++) {
146 if (sechdrs[i].sh_type == SHT_RELA) {
147 DEBUGP("Found relocations in section %u\n", i);
148 DEBUGP("Ptr: %p. Number: %lu\n",
149 (void *)sechdrs[i].sh_addr,
150 sechdrs[i].sh_size / sizeof(Elf64_Rela));
151
152 /* Sort the relocation information based on a symbol and
153 * addend key. This is a stable O(n*log n) complexity
154 * alogrithm but it will reduce the complexity of
155 * count_relocs() to linear complexity O(n)
156 */
157 sort((void *)sechdrs[i].sh_addr,
158 sechdrs[i].sh_size / sizeof(Elf64_Rela),
159 sizeof(Elf64_Rela), relacmp, relaswap);
160
161 relocs += count_relocs((void *)sechdrs[i].sh_addr,
162 sechdrs[i].sh_size
163 / sizeof(Elf64_Rela));
164 }
165 }
166
167#ifdef CONFIG_DYNAMIC_FTRACE
168 /* make the trampoline to the ftrace_caller */
169 relocs++;
170#endif
171
172 DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
173 return relocs * sizeof(struct ppc64_stub_entry);
174}
175
176static void dedotify_versions(struct modversion_info *vers,
177 unsigned long size)
178{
179 struct modversion_info *end;
180
181 for (end = (void *)vers + size; vers < end; vers++)
182 if (vers->name[0] == '.')
183 memmove(vers->name, vers->name+1, strlen(vers->name));
184}
185
186/* Undefined symbols which refer to .funcname, hack to funcname */
187static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
188{
189 unsigned int i;
190
191 for (i = 1; i < numsyms; i++) {
192 if (syms[i].st_shndx == SHN_UNDEF) {
193 char *name = strtab + syms[i].st_name;
194 if (name[0] == '.')
195 memmove(name, name+1, strlen(name));
196 }
197 }
198}
199
200int module_frob_arch_sections(Elf64_Ehdr *hdr,
201 Elf64_Shdr *sechdrs,
202 char *secstrings,
203 struct module *me)
204{
205 unsigned int i;
206
207 /* Find .toc and .stubs sections, symtab and strtab */
208 for (i = 1; i < hdr->e_shnum; i++) {
209 char *p;
210 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
211 me->arch.stubs_section = i;
212 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
213 me->arch.toc_section = i;
214 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
215 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
216 sechdrs[i].sh_size);
217
218 /* We don't handle .init for the moment: rename to _init */
219 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
220 p[0] = '_';
221
222 if (sechdrs[i].sh_type == SHT_SYMTAB)
223 dedotify((void *)hdr + sechdrs[i].sh_offset,
224 sechdrs[i].sh_size / sizeof(Elf64_Sym),
225 (void *)hdr
226 + sechdrs[sechdrs[i].sh_link].sh_offset);
227 }
228
229 if (!me->arch.stubs_section) {
230 printk("%s: doesn't contain .stubs.\n", me->name);
231 return -ENOEXEC;
232 }
233
234 /* If we don't have a .toc, just use .stubs. We need to set r2
235 to some reasonable value in case the module calls out to
236 other functions via a stub, or if a function pointer escapes
237 the module by some means. */
238 if (!me->arch.toc_section)
239 me->arch.toc_section = me->arch.stubs_section;
240
241 /* Override the stubs size */
242 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
243 return 0;
244}
245
246/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
247 gives the value maximum span in an instruction which uses a signed
248 offset) */
249static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
250{
251 return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
252}
253
254/* Both low and high 16 bits are added as SIGNED additions, so if low
255 16 bits has high bit set, high 16 bits must be adjusted. These
256 macros do that (stolen from binutils). */
257#define PPC_LO(v) ((v) & 0xffff)
258#define PPC_HI(v) (((v) >> 16) & 0xffff)
259#define PPC_HA(v) PPC_HI ((v) + 0x8000)
260
261/* Patch stub to reference function and correct r2 value. */
262static inline int create_stub(Elf64_Shdr *sechdrs,
263 struct ppc64_stub_entry *entry,
264 struct ppc64_opd_entry *opd,
265 struct module *me)
266{
267 Elf64_Half *loc1, *loc2;
268 long reladdr;
269
270 *entry = ppc64_stub;
271
272 loc1 = (Elf64_Half *)&entry->jump[2];
273 loc2 = (Elf64_Half *)&entry->jump[6];
274
275 /* Stub uses address relative to r2. */
276 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
277 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
278 printk("%s: Address %p of stub out of range of %p.\n",
279 me->name, (void *)reladdr, (void *)my_r2);
280 return 0;
281 }
282 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
283
284 *loc1 = PPC_HA(reladdr);
285 *loc2 = PPC_LO(reladdr);
286 entry->opd.funcaddr = opd->funcaddr;
287 entry->opd.r2 = opd->r2;
288 return 1;
289}
290
291/* Create stub to jump to function described in this OPD: we need the
292 stub to set up the TOC ptr (r2) for the function. */
293static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
294 unsigned long opdaddr,
295 struct module *me)
296{
297 struct ppc64_stub_entry *stubs;
298 struct ppc64_opd_entry *opd = (void *)opdaddr;
299 unsigned int i, num_stubs;
300
301 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
302
303 /* Find this stub, or if that fails, the next avail. entry */
304 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
305 for (i = 0; stubs[i].opd.funcaddr; i++) {
306 BUG_ON(i >= num_stubs);
307
308 if (stubs[i].opd.funcaddr == opd->funcaddr)
309 return (unsigned long)&stubs[i];
310 }
311
312 if (!create_stub(sechdrs, &stubs[i], opd, me))
313 return 0;
314
315 return (unsigned long)&stubs[i];
316}
317
318/* We expect a noop next: if it is, replace it with instruction to
319 restore r2. */
320static int restore_r2(u32 *instruction, struct module *me)
321{
322 if (*instruction != PPC_INST_NOP) {
323 printk("%s: Expect noop after relocate, got %08x\n",
324 me->name, *instruction);
325 return 0;
326 }
327 *instruction = 0xe8410028; /* ld r2,40(r1) */
328 return 1;
329}
330
331int apply_relocate_add(Elf64_Shdr *sechdrs,
332 const char *strtab,
333 unsigned int symindex,
334 unsigned int relsec,
335 struct module *me)
336{
337 unsigned int i;
338 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
339 Elf64_Sym *sym;
340 unsigned long *location;
341 unsigned long value;
342
343 DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
344 sechdrs[relsec].sh_info);
345 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
346 /* This is where to make the change */
347 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
348 + rela[i].r_offset;
349 /* This is the symbol it is referring to */
350 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
351 + ELF64_R_SYM(rela[i].r_info);
352
353 DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
354 location, (long)ELF64_R_TYPE(rela[i].r_info),
355 strtab + sym->st_name, (unsigned long)sym->st_value,
356 (long)rela[i].r_addend);
357
358 /* `Everything is relative'. */
359 value = sym->st_value + rela[i].r_addend;
360
361 switch (ELF64_R_TYPE(rela[i].r_info)) {
362 case R_PPC64_ADDR32:
363 /* Simply set it */
364 *(u32 *)location = value;
365 break;
366
367 case R_PPC64_ADDR64:
368 /* Simply set it */
369 *(unsigned long *)location = value;
370 break;
371
372 case R_PPC64_TOC:
373 *(unsigned long *)location = my_r2(sechdrs, me);
374 break;
375
376 case R_PPC64_TOC16:
377 /* Subtract TOC pointer */
378 value -= my_r2(sechdrs, me);
379 if (value + 0x8000 > 0xffff) {
380 printk("%s: bad TOC16 relocation (%lu)\n",
381 me->name, value);
382 return -ENOEXEC;
383 }
384 *((uint16_t *) location)
385 = (*((uint16_t *) location) & ~0xffff)
386 | (value & 0xffff);
387 break;
388
389 case R_PPC64_TOC16_DS:
390 /* Subtract TOC pointer */
391 value -= my_r2(sechdrs, me);
392 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
393 printk("%s: bad TOC16_DS relocation (%lu)\n",
394 me->name, value);
395 return -ENOEXEC;
396 }
397 *((uint16_t *) location)
398 = (*((uint16_t *) location) & ~0xfffc)
399 | (value & 0xfffc);
400 break;
401
402 case R_PPC_REL24:
403 /* FIXME: Handle weak symbols here --RR */
404 if (sym->st_shndx == SHN_UNDEF) {
405 /* External: go via stub */
406 value = stub_for_addr(sechdrs, value, me);
407 if (!value)
408 return -ENOENT;
409 if (!restore_r2((u32 *)location + 1, me))
410 return -ENOEXEC;
411 }
412
413 /* Convert value to relative */
414 value -= (unsigned long)location;
415 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
416 printk("%s: REL24 %li out of range!\n",
417 me->name, (long int)value);
418 return -ENOEXEC;
419 }
420
421 /* Only replace bits 2 through 26 */
422 *(uint32_t *)location
423 = (*(uint32_t *)location & ~0x03fffffc)
424 | (value & 0x03fffffc);
425 break;
426
427 case R_PPC64_REL64:
428 /* 64 bits relative (used by features fixups) */
429 *location = value - (unsigned long)location;
430 break;
431
432 default:
433 printk("%s: Unknown ADD relocation: %lu\n",
434 me->name,
435 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
436 return -ENOEXEC;
437 }
438 }
439
440#ifdef CONFIG_DYNAMIC_FTRACE
441 me->arch.toc = my_r2(sechdrs, me);
442 me->arch.tramp = stub_for_addr(sechdrs,
443 (unsigned long)ftrace_caller,
444 me);
445#endif
446
447 return 0;
448}