Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Kernel module help for PPC64.
3 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
4
5*/
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/module.h>
10#include <linux/elf.h>
11#include <linux/moduleloader.h>
12#include <linux/err.h>
13#include <linux/vmalloc.h>
14#include <linux/ftrace.h>
15#include <linux/bug.h>
16#include <linux/uaccess.h>
17#include <asm/module.h>
18#include <asm/firmware.h>
19#include <asm/code-patching.h>
20#include <linux/sort.h>
21#include <asm/setup.h>
22#include <asm/sections.h>
23#include <asm/inst.h>
24
25/* FIXME: We don't do .init separately. To do this, we'd need to have
26 a separate r2 value in the init and core section, and stub between
27 them, too.
28
29 Using a magic allocator which places modules within 32MB solves
30 this, and makes other things simpler. Anton?
31 --RR. */
32
33#ifdef PPC64_ELF_ABI_v2
34
35/* An address is simply the address of the function. */
36typedef unsigned long func_desc_t;
37
38static func_desc_t func_desc(unsigned long addr)
39{
40 return addr;
41}
42static unsigned long func_addr(unsigned long addr)
43{
44 return addr;
45}
46static unsigned long stub_func_addr(func_desc_t func)
47{
48 return func;
49}
50
51/* PowerPC64 specific values for the Elf64_Sym st_other field. */
52#define STO_PPC64_LOCAL_BIT 5
53#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT)
54#define PPC64_LOCAL_ENTRY_OFFSET(other) \
55 (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
56
57static unsigned int local_entry_offset(const Elf64_Sym *sym)
58{
59 /* sym->st_other indicates offset to local entry point
60 * (otherwise it will assume r12 is the address of the start
61 * of function and try to derive r2 from it). */
62 return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
63}
64#else
65
66/* An address is address of the OPD entry, which contains address of fn. */
67typedef struct ppc64_opd_entry func_desc_t;
68
69static func_desc_t func_desc(unsigned long addr)
70{
71 return *(struct ppc64_opd_entry *)addr;
72}
73static unsigned long func_addr(unsigned long addr)
74{
75 return func_desc(addr).funcaddr;
76}
77static unsigned long stub_func_addr(func_desc_t func)
78{
79 return func.funcaddr;
80}
81static unsigned int local_entry_offset(const Elf64_Sym *sym)
82{
83 return 0;
84}
85
86void *dereference_module_function_descriptor(struct module *mod, void *ptr)
87{
88 if (ptr < (void *)mod->arch.start_opd ||
89 ptr >= (void *)mod->arch.end_opd)
90 return ptr;
91
92 return dereference_function_descriptor(ptr);
93}
94#endif
95
96#define STUB_MAGIC 0x73747562 /* stub */
97
98/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
99 the kernel itself). But on PPC64, these need to be used for every
100 jump, actually, to reset r2 (TOC+0x8000). */
101struct ppc64_stub_entry
102{
103 /* 28 byte jump instruction sequence (7 instructions). We only
104 * need 6 instructions on ABIv2 but we always allocate 7 so
105 * so we don't have to modify the trampoline load instruction. */
106 u32 jump[7];
107 /* Used by ftrace to identify stubs */
108 u32 magic;
109 /* Data for the above code */
110 func_desc_t funcdata;
111};
112
113/*
114 * PPC64 uses 24 bit jumps, but we need to jump into other modules or
115 * the kernel which may be further. So we jump to a stub.
116 *
117 * For ELFv1 we need to use this to set up the new r2 value (aka TOC
118 * pointer). For ELFv2 it's the callee's responsibility to set up the
119 * new r2, but for both we need to save the old r2.
120 *
121 * We could simply patch the new r2 value and function pointer into
122 * the stub, but it's significantly shorter to put these values at the
123 * end of the stub code, and patch the stub address (32-bits relative
124 * to the TOC ptr, r2) into the stub.
125 */
126static u32 ppc64_stub_insns[] = {
127 PPC_RAW_ADDIS(_R11, _R2, 0),
128 PPC_RAW_ADDI(_R11, _R11, 0),
129 /* Save current r2 value in magic place on the stack. */
130 PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
131 PPC_RAW_LD(_R12, _R11, 32),
132#ifdef PPC64_ELF_ABI_v1
133 /* Set up new r2 from function descriptor */
134 PPC_RAW_LD(_R2, _R11, 40),
135#endif
136 PPC_RAW_MTCTR(_R12),
137 PPC_RAW_BCTR(),
138};
139
140/* Count how many different 24-bit relocations (different symbol,
141 different addend) */
142static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
143{
144 unsigned int i, r_info, r_addend, _count_relocs;
145
146 /* FIXME: Only count external ones --RR */
147 _count_relocs = 0;
148 r_info = 0;
149 r_addend = 0;
150 for (i = 0; i < num; i++)
151 /* Only count 24-bit relocs, others don't need stubs */
152 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
153 (r_info != ELF64_R_SYM(rela[i].r_info) ||
154 r_addend != rela[i].r_addend)) {
155 _count_relocs++;
156 r_info = ELF64_R_SYM(rela[i].r_info);
157 r_addend = rela[i].r_addend;
158 }
159
160 return _count_relocs;
161}
162
163static int relacmp(const void *_x, const void *_y)
164{
165 const Elf64_Rela *x, *y;
166
167 y = (Elf64_Rela *)_x;
168 x = (Elf64_Rela *)_y;
169
170 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
171 * make the comparison cheaper/faster. It won't affect the sorting or
172 * the counting algorithms' performance
173 */
174 if (x->r_info < y->r_info)
175 return -1;
176 else if (x->r_info > y->r_info)
177 return 1;
178 else if (x->r_addend < y->r_addend)
179 return -1;
180 else if (x->r_addend > y->r_addend)
181 return 1;
182 else
183 return 0;
184}
185
186/* Get size of potential trampolines required. */
187static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
188 const Elf64_Shdr *sechdrs)
189{
190 /* One extra reloc so it's always 0-funcaddr terminated */
191 unsigned long relocs = 1;
192 unsigned i;
193
194 /* Every relocated section... */
195 for (i = 1; i < hdr->e_shnum; i++) {
196 if (sechdrs[i].sh_type == SHT_RELA) {
197 pr_debug("Found relocations in section %u\n", i);
198 pr_debug("Ptr: %p. Number: %Lu\n",
199 (void *)sechdrs[i].sh_addr,
200 sechdrs[i].sh_size / sizeof(Elf64_Rela));
201
202 /* Sort the relocation information based on a symbol and
203 * addend key. This is a stable O(n*log n) complexity
204 * alogrithm but it will reduce the complexity of
205 * count_relocs() to linear complexity O(n)
206 */
207 sort((void *)sechdrs[i].sh_addr,
208 sechdrs[i].sh_size / sizeof(Elf64_Rela),
209 sizeof(Elf64_Rela), relacmp, NULL);
210
211 relocs += count_relocs((void *)sechdrs[i].sh_addr,
212 sechdrs[i].sh_size
213 / sizeof(Elf64_Rela));
214 }
215 }
216
217#ifdef CONFIG_DYNAMIC_FTRACE
218 /* make the trampoline to the ftrace_caller */
219 relocs++;
220#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
221 /* an additional one for ftrace_regs_caller */
222 relocs++;
223#endif
224#endif
225
226 pr_debug("Looks like a total of %lu stubs, max\n", relocs);
227 return relocs * sizeof(struct ppc64_stub_entry);
228}
229
230/* Still needed for ELFv2, for .TOC. */
231static void dedotify_versions(struct modversion_info *vers,
232 unsigned long size)
233{
234 struct modversion_info *end;
235
236 for (end = (void *)vers + size; vers < end; vers++)
237 if (vers->name[0] == '.') {
238 memmove(vers->name, vers->name+1, strlen(vers->name));
239 }
240}
241
242/*
243 * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
244 * seem to be defined (value set later).
245 */
246static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
247{
248 unsigned int i;
249
250 for (i = 1; i < numsyms; i++) {
251 if (syms[i].st_shndx == SHN_UNDEF) {
252 char *name = strtab + syms[i].st_name;
253 if (name[0] == '.') {
254 if (strcmp(name+1, "TOC.") == 0)
255 syms[i].st_shndx = SHN_ABS;
256 syms[i].st_name++;
257 }
258 }
259 }
260}
261
262static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
263 const char *strtab,
264 unsigned int symindex)
265{
266 unsigned int i, numsyms;
267 Elf64_Sym *syms;
268
269 syms = (Elf64_Sym *)sechdrs[symindex].sh_addr;
270 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
271
272 for (i = 1; i < numsyms; i++) {
273 if (syms[i].st_shndx == SHN_ABS
274 && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
275 return &syms[i];
276 }
277 return NULL;
278}
279
280int module_frob_arch_sections(Elf64_Ehdr *hdr,
281 Elf64_Shdr *sechdrs,
282 char *secstrings,
283 struct module *me)
284{
285 unsigned int i;
286
287 /* Find .toc and .stubs sections, symtab and strtab */
288 for (i = 1; i < hdr->e_shnum; i++) {
289 char *p;
290 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
291 me->arch.stubs_section = i;
292 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) {
293 me->arch.toc_section = i;
294 if (sechdrs[i].sh_addralign < 8)
295 sechdrs[i].sh_addralign = 8;
296 }
297 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
298 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
299 sechdrs[i].sh_size);
300
301 /* We don't handle .init for the moment: rename to _init */
302 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
303 p[0] = '_';
304
305 if (sechdrs[i].sh_type == SHT_SYMTAB)
306 dedotify((void *)hdr + sechdrs[i].sh_offset,
307 sechdrs[i].sh_size / sizeof(Elf64_Sym),
308 (void *)hdr
309 + sechdrs[sechdrs[i].sh_link].sh_offset);
310 }
311
312 if (!me->arch.stubs_section) {
313 pr_err("%s: doesn't contain .stubs.\n", me->name);
314 return -ENOEXEC;
315 }
316
317 /* If we don't have a .toc, just use .stubs. We need to set r2
318 to some reasonable value in case the module calls out to
319 other functions via a stub, or if a function pointer escapes
320 the module by some means. */
321 if (!me->arch.toc_section)
322 me->arch.toc_section = me->arch.stubs_section;
323
324 /* Override the stubs size */
325 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
326 return 0;
327}
328
329#ifdef CONFIG_MPROFILE_KERNEL
330
331static u32 stub_insns[] = {
332 PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
333 PPC_RAW_ADDIS(_R12, _R12, 0),
334 PPC_RAW_ADDI(_R12, _R12, 0),
335 PPC_RAW_MTCTR(_R12),
336 PPC_RAW_BCTR(),
337};
338
339/*
340 * For mprofile-kernel we use a special stub for ftrace_caller() because we
341 * can't rely on r2 containing this module's TOC when we enter the stub.
342 *
343 * That can happen if the function calling us didn't need to use the toc. In
344 * that case it won't have setup r2, and the r2 value will be either the
345 * kernel's toc, or possibly another modules toc.
346 *
347 * To deal with that this stub uses the kernel toc, which is always accessible
348 * via the paca (in r13). The target (ftrace_caller()) is responsible for
349 * saving and restoring the toc before returning.
350 */
351static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
352 unsigned long addr,
353 struct module *me)
354{
355 long reladdr;
356
357 memcpy(entry->jump, stub_insns, sizeof(stub_insns));
358
359 /* Stub uses address relative to kernel toc (from the paca) */
360 reladdr = addr - kernel_toc_addr();
361 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
362 pr_err("%s: Address of %ps out of range of kernel_toc.\n",
363 me->name, (void *)addr);
364 return 0;
365 }
366
367 entry->jump[1] |= PPC_HA(reladdr);
368 entry->jump[2] |= PPC_LO(reladdr);
369
370 /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
371 entry->funcdata = func_desc(addr);
372 entry->magic = STUB_MAGIC;
373
374 return 1;
375}
376
377static bool is_mprofile_ftrace_call(const char *name)
378{
379 if (!strcmp("_mcount", name))
380 return true;
381#ifdef CONFIG_DYNAMIC_FTRACE
382 if (!strcmp("ftrace_caller", name))
383 return true;
384#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
385 if (!strcmp("ftrace_regs_caller", name))
386 return true;
387#endif
388#endif
389
390 return false;
391}
392#else
393static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
394 unsigned long addr,
395 struct module *me)
396{
397 return 0;
398}
399
400static bool is_mprofile_ftrace_call(const char *name)
401{
402 return false;
403}
404#endif
405
406/*
407 * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
408 * value maximum span in an instruction which uses a signed offset). Round down
409 * to a 256 byte boundary for the odd case where we are setting up r2 without a
410 * .toc section.
411 */
412static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
413{
414 return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000;
415}
416
417/* Patch stub to reference function and correct r2 value. */
418static inline int create_stub(const Elf64_Shdr *sechdrs,
419 struct ppc64_stub_entry *entry,
420 unsigned long addr,
421 struct module *me,
422 const char *name)
423{
424 long reladdr;
425
426 if (is_mprofile_ftrace_call(name))
427 return create_ftrace_stub(entry, addr, me);
428
429 memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
430
431 /* Stub uses address relative to r2. */
432 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
433 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
434 pr_err("%s: Address %p of stub out of range of %p.\n",
435 me->name, (void *)reladdr, (void *)my_r2);
436 return 0;
437 }
438 pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
439
440 entry->jump[0] |= PPC_HA(reladdr);
441 entry->jump[1] |= PPC_LO(reladdr);
442 entry->funcdata = func_desc(addr);
443 entry->magic = STUB_MAGIC;
444
445 return 1;
446}
447
448/* Create stub to jump to function described in this OPD/ptr: we need the
449 stub to set up the TOC ptr (r2) for the function. */
450static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
451 unsigned long addr,
452 struct module *me,
453 const char *name)
454{
455 struct ppc64_stub_entry *stubs;
456 unsigned int i, num_stubs;
457
458 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
459
460 /* Find this stub, or if that fails, the next avail. entry */
461 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
462 for (i = 0; stub_func_addr(stubs[i].funcdata); i++) {
463 if (WARN_ON(i >= num_stubs))
464 return 0;
465
466 if (stub_func_addr(stubs[i].funcdata) == func_addr(addr))
467 return (unsigned long)&stubs[i];
468 }
469
470 if (!create_stub(sechdrs, &stubs[i], addr, me, name))
471 return 0;
472
473 return (unsigned long)&stubs[i];
474}
475
476/* We expect a noop next: if it is, replace it with instruction to
477 restore r2. */
478static int restore_r2(const char *name, u32 *instruction, struct module *me)
479{
480 u32 *prev_insn = instruction - 1;
481
482 if (is_mprofile_ftrace_call(name))
483 return 1;
484
485 /*
486 * Make sure the branch isn't a sibling call. Sibling calls aren't
487 * "link" branches and they don't return, so they don't need the r2
488 * restore afterwards.
489 */
490 if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
491 return 1;
492
493 if (*instruction != PPC_RAW_NOP()) {
494 pr_err("%s: Expected nop after call, got %08x at %pS\n",
495 me->name, *instruction, instruction);
496 return 0;
497 }
498 /* ld r2,R2_STACK_OFFSET(r1) */
499 *instruction = PPC_INST_LD_TOC;
500 return 1;
501}
502
503int apply_relocate_add(Elf64_Shdr *sechdrs,
504 const char *strtab,
505 unsigned int symindex,
506 unsigned int relsec,
507 struct module *me)
508{
509 unsigned int i;
510 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
511 Elf64_Sym *sym;
512 unsigned long *location;
513 unsigned long value;
514
515 pr_debug("Applying ADD relocate section %u to %u\n", relsec,
516 sechdrs[relsec].sh_info);
517
518 /* First time we're called, we can fix up .TOC. */
519 if (!me->arch.toc_fixed) {
520 sym = find_dot_toc(sechdrs, strtab, symindex);
521 /* It's theoretically possible that a module doesn't want a
522 * .TOC. so don't fail it just for that. */
523 if (sym)
524 sym->st_value = my_r2(sechdrs, me);
525 me->arch.toc_fixed = true;
526 }
527
528 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
529 /* This is where to make the change */
530 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
531 + rela[i].r_offset;
532 /* This is the symbol it is referring to */
533 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
534 + ELF64_R_SYM(rela[i].r_info);
535
536 pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
537 location, (long)ELF64_R_TYPE(rela[i].r_info),
538 strtab + sym->st_name, (unsigned long)sym->st_value,
539 (long)rela[i].r_addend);
540
541 /* `Everything is relative'. */
542 value = sym->st_value + rela[i].r_addend;
543
544 switch (ELF64_R_TYPE(rela[i].r_info)) {
545 case R_PPC64_ADDR32:
546 /* Simply set it */
547 *(u32 *)location = value;
548 break;
549
550 case R_PPC64_ADDR64:
551 /* Simply set it */
552 *(unsigned long *)location = value;
553 break;
554
555 case R_PPC64_TOC:
556 *(unsigned long *)location = my_r2(sechdrs, me);
557 break;
558
559 case R_PPC64_TOC16:
560 /* Subtract TOC pointer */
561 value -= my_r2(sechdrs, me);
562 if (value + 0x8000 > 0xffff) {
563 pr_err("%s: bad TOC16 relocation (0x%lx)\n",
564 me->name, value);
565 return -ENOEXEC;
566 }
567 *((uint16_t *) location)
568 = (*((uint16_t *) location) & ~0xffff)
569 | (value & 0xffff);
570 break;
571
572 case R_PPC64_TOC16_LO:
573 /* Subtract TOC pointer */
574 value -= my_r2(sechdrs, me);
575 *((uint16_t *) location)
576 = (*((uint16_t *) location) & ~0xffff)
577 | (value & 0xffff);
578 break;
579
580 case R_PPC64_TOC16_DS:
581 /* Subtract TOC pointer */
582 value -= my_r2(sechdrs, me);
583 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
584 pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
585 me->name, value);
586 return -ENOEXEC;
587 }
588 *((uint16_t *) location)
589 = (*((uint16_t *) location) & ~0xfffc)
590 | (value & 0xfffc);
591 break;
592
593 case R_PPC64_TOC16_LO_DS:
594 /* Subtract TOC pointer */
595 value -= my_r2(sechdrs, me);
596 if ((value & 3) != 0) {
597 pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
598 me->name, value);
599 return -ENOEXEC;
600 }
601 *((uint16_t *) location)
602 = (*((uint16_t *) location) & ~0xfffc)
603 | (value & 0xfffc);
604 break;
605
606 case R_PPC64_TOC16_HA:
607 /* Subtract TOC pointer */
608 value -= my_r2(sechdrs, me);
609 value = ((value + 0x8000) >> 16);
610 *((uint16_t *) location)
611 = (*((uint16_t *) location) & ~0xffff)
612 | (value & 0xffff);
613 break;
614
615 case R_PPC_REL24:
616 /* FIXME: Handle weak symbols here --RR */
617 if (sym->st_shndx == SHN_UNDEF ||
618 sym->st_shndx == SHN_LIVEPATCH) {
619 /* External: go via stub */
620 value = stub_for_addr(sechdrs, value, me,
621 strtab + sym->st_name);
622 if (!value)
623 return -ENOENT;
624 if (!restore_r2(strtab + sym->st_name,
625 (u32 *)location + 1, me))
626 return -ENOEXEC;
627 } else
628 value += local_entry_offset(sym);
629
630 /* Convert value to relative */
631 value -= (unsigned long)location;
632 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
633 pr_err("%s: REL24 %li out of range!\n",
634 me->name, (long int)value);
635 return -ENOEXEC;
636 }
637
638 /* Only replace bits 2 through 26 */
639 *(uint32_t *)location
640 = (*(uint32_t *)location & ~0x03fffffc)
641 | (value & 0x03fffffc);
642 break;
643
644 case R_PPC64_REL64:
645 /* 64 bits relative (used by features fixups) */
646 *location = value - (unsigned long)location;
647 break;
648
649 case R_PPC64_REL32:
650 /* 32 bits relative (used by relative exception tables) */
651 /* Convert value to relative */
652 value -= (unsigned long)location;
653 if (value + 0x80000000 > 0xffffffff) {
654 pr_err("%s: REL32 %li out of range!\n",
655 me->name, (long int)value);
656 return -ENOEXEC;
657 }
658 *(u32 *)location = value;
659 break;
660
661 case R_PPC64_TOCSAVE:
662 /*
663 * Marker reloc indicates we don't have to save r2.
664 * That would only save us one instruction, so ignore
665 * it.
666 */
667 break;
668
669 case R_PPC64_ENTRY:
670 /*
671 * Optimize ELFv2 large code model entry point if
672 * the TOC is within 2GB range of current location.
673 */
674 value = my_r2(sechdrs, me) - (unsigned long)location;
675 if (value + 0x80008000 > 0xffffffff)
676 break;
677 /*
678 * Check for the large code model prolog sequence:
679 * ld r2, ...(r12)
680 * add r2, r2, r12
681 */
682 if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0))
683 break;
684 if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12))
685 break;
686 /*
687 * If found, replace it with:
688 * addis r2, r12, (.TOC.-func)@ha
689 * addi r2, r2, (.TOC.-func)@l
690 */
691 ((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value));
692 ((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value));
693 break;
694
695 case R_PPC64_REL16_HA:
696 /* Subtract location pointer */
697 value -= (unsigned long)location;
698 value = ((value + 0x8000) >> 16);
699 *((uint16_t *) location)
700 = (*((uint16_t *) location) & ~0xffff)
701 | (value & 0xffff);
702 break;
703
704 case R_PPC64_REL16_LO:
705 /* Subtract location pointer */
706 value -= (unsigned long)location;
707 *((uint16_t *) location)
708 = (*((uint16_t *) location) & ~0xffff)
709 | (value & 0xffff);
710 break;
711
712 default:
713 pr_err("%s: Unknown ADD relocation: %lu\n",
714 me->name,
715 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
716 return -ENOEXEC;
717 }
718 }
719
720 return 0;
721}
722
723#ifdef CONFIG_DYNAMIC_FTRACE
724int module_trampoline_target(struct module *mod, unsigned long addr,
725 unsigned long *target)
726{
727 struct ppc64_stub_entry *stub;
728 func_desc_t funcdata;
729 u32 magic;
730
731 if (!within_module_core(addr, mod)) {
732 pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
733 return -EFAULT;
734 }
735
736 stub = (struct ppc64_stub_entry *)addr;
737
738 if (copy_from_kernel_nofault(&magic, &stub->magic,
739 sizeof(magic))) {
740 pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
741 return -EFAULT;
742 }
743
744 if (magic != STUB_MAGIC) {
745 pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
746 return -EFAULT;
747 }
748
749 if (copy_from_kernel_nofault(&funcdata, &stub->funcdata,
750 sizeof(funcdata))) {
751 pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
752 return -EFAULT;
753 }
754
755 *target = stub_func_addr(funcdata);
756
757 return 0;
758}
759
760int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
761{
762 mod->arch.tramp = stub_for_addr(sechdrs,
763 (unsigned long)ftrace_caller,
764 mod,
765 "ftrace_caller");
766#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
767 mod->arch.tramp_regs = stub_for_addr(sechdrs,
768 (unsigned long)ftrace_regs_caller,
769 mod,
770 "ftrace_regs_caller");
771 if (!mod->arch.tramp_regs)
772 return -ENOENT;
773#endif
774
775 if (!mod->arch.tramp)
776 return -ENOENT;
777
778 return 0;
779}
780#endif
1/* Kernel module help for PPC64.
2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/module.h>
22#include <linux/elf.h>
23#include <linux/moduleloader.h>
24#include <linux/err.h>
25#include <linux/vmalloc.h>
26#include <linux/ftrace.h>
27#include <linux/bug.h>
28#include <linux/uaccess.h>
29#include <asm/module.h>
30#include <asm/firmware.h>
31#include <asm/code-patching.h>
32#include <linux/sort.h>
33#include <asm/setup.h>
34#include <asm/sections.h>
35
36/* FIXME: We don't do .init separately. To do this, we'd need to have
37 a separate r2 value in the init and core section, and stub between
38 them, too.
39
40 Using a magic allocator which places modules within 32MB solves
41 this, and makes other things simpler. Anton?
42 --RR. */
43
44#ifdef PPC64_ELF_ABI_v2
45
46/* An address is simply the address of the function. */
47typedef unsigned long func_desc_t;
48
49static func_desc_t func_desc(unsigned long addr)
50{
51 return addr;
52}
53static unsigned long func_addr(unsigned long addr)
54{
55 return addr;
56}
57static unsigned long stub_func_addr(func_desc_t func)
58{
59 return func;
60}
61
62/* PowerPC64 specific values for the Elf64_Sym st_other field. */
63#define STO_PPC64_LOCAL_BIT 5
64#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT)
65#define PPC64_LOCAL_ENTRY_OFFSET(other) \
66 (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
67
68static unsigned int local_entry_offset(const Elf64_Sym *sym)
69{
70 /* sym->st_other indicates offset to local entry point
71 * (otherwise it will assume r12 is the address of the start
72 * of function and try to derive r2 from it). */
73 return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
74}
75#else
76
77/* An address is address of the OPD entry, which contains address of fn. */
78typedef struct ppc64_opd_entry func_desc_t;
79
80static func_desc_t func_desc(unsigned long addr)
81{
82 return *(struct ppc64_opd_entry *)addr;
83}
84static unsigned long func_addr(unsigned long addr)
85{
86 return func_desc(addr).funcaddr;
87}
88static unsigned long stub_func_addr(func_desc_t func)
89{
90 return func.funcaddr;
91}
92static unsigned int local_entry_offset(const Elf64_Sym *sym)
93{
94 return 0;
95}
96#endif
97
98#define STUB_MAGIC 0x73747562 /* stub */
99
100/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
101 the kernel itself). But on PPC64, these need to be used for every
102 jump, actually, to reset r2 (TOC+0x8000). */
103struct ppc64_stub_entry
104{
105 /* 28 byte jump instruction sequence (7 instructions). We only
106 * need 6 instructions on ABIv2 but we always allocate 7 so
107 * so we don't have to modify the trampoline load instruction. */
108 u32 jump[7];
109 /* Used by ftrace to identify stubs */
110 u32 magic;
111 /* Data for the above code */
112 func_desc_t funcdata;
113};
114
115/*
116 * PPC64 uses 24 bit jumps, but we need to jump into other modules or
117 * the kernel which may be further. So we jump to a stub.
118 *
119 * For ELFv1 we need to use this to set up the new r2 value (aka TOC
120 * pointer). For ELFv2 it's the callee's responsibility to set up the
121 * new r2, but for both we need to save the old r2.
122 *
123 * We could simply patch the new r2 value and function pointer into
124 * the stub, but it's significantly shorter to put these values at the
125 * end of the stub code, and patch the stub address (32-bits relative
126 * to the TOC ptr, r2) into the stub.
127 */
128
129static u32 ppc64_stub_insns[] = {
130 0x3d620000, /* addis r11,r2, <high> */
131 0x396b0000, /* addi r11,r11, <low> */
132 /* Save current r2 value in magic place on the stack. */
133 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */
134 0xe98b0020, /* ld r12,32(r11) */
135#ifdef PPC64_ELF_ABI_v1
136 /* Set up new r2 from function descriptor */
137 0xe84b0028, /* ld r2,40(r11) */
138#endif
139 0x7d8903a6, /* mtctr r12 */
140 0x4e800420 /* bctr */
141};
142
143#ifdef CONFIG_DYNAMIC_FTRACE
144int module_trampoline_target(struct module *mod, unsigned long addr,
145 unsigned long *target)
146{
147 struct ppc64_stub_entry *stub;
148 func_desc_t funcdata;
149 u32 magic;
150
151 if (!within_module_core(addr, mod)) {
152 pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
153 return -EFAULT;
154 }
155
156 stub = (struct ppc64_stub_entry *)addr;
157
158 if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
159 pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
160 return -EFAULT;
161 }
162
163 if (magic != STUB_MAGIC) {
164 pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
165 return -EFAULT;
166 }
167
168 if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
169 pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
170 return -EFAULT;
171 }
172
173 *target = stub_func_addr(funcdata);
174
175 return 0;
176}
177#endif
178
179/* Count how many different 24-bit relocations (different symbol,
180 different addend) */
181static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
182{
183 unsigned int i, r_info, r_addend, _count_relocs;
184
185 /* FIXME: Only count external ones --RR */
186 _count_relocs = 0;
187 r_info = 0;
188 r_addend = 0;
189 for (i = 0; i < num; i++)
190 /* Only count 24-bit relocs, others don't need stubs */
191 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
192 (r_info != ELF64_R_SYM(rela[i].r_info) ||
193 r_addend != rela[i].r_addend)) {
194 _count_relocs++;
195 r_info = ELF64_R_SYM(rela[i].r_info);
196 r_addend = rela[i].r_addend;
197 }
198
199 return _count_relocs;
200}
201
202static int relacmp(const void *_x, const void *_y)
203{
204 const Elf64_Rela *x, *y;
205
206 y = (Elf64_Rela *)_x;
207 x = (Elf64_Rela *)_y;
208
209 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
210 * make the comparison cheaper/faster. It won't affect the sorting or
211 * the counting algorithms' performance
212 */
213 if (x->r_info < y->r_info)
214 return -1;
215 else if (x->r_info > y->r_info)
216 return 1;
217 else if (x->r_addend < y->r_addend)
218 return -1;
219 else if (x->r_addend > y->r_addend)
220 return 1;
221 else
222 return 0;
223}
224
225static void relaswap(void *_x, void *_y, int size)
226{
227 uint64_t *x, *y, tmp;
228 int i;
229
230 y = (uint64_t *)_x;
231 x = (uint64_t *)_y;
232
233 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
234 tmp = x[i];
235 x[i] = y[i];
236 y[i] = tmp;
237 }
238}
239
240/* Get size of potential trampolines required. */
241static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
242 const Elf64_Shdr *sechdrs)
243{
244 /* One extra reloc so it's always 0-funcaddr terminated */
245 unsigned long relocs = 1;
246 unsigned i;
247
248 /* Every relocated section... */
249 for (i = 1; i < hdr->e_shnum; i++) {
250 if (sechdrs[i].sh_type == SHT_RELA) {
251 pr_debug("Found relocations in section %u\n", i);
252 pr_debug("Ptr: %p. Number: %Lu\n",
253 (void *)sechdrs[i].sh_addr,
254 sechdrs[i].sh_size / sizeof(Elf64_Rela));
255
256 /* Sort the relocation information based on a symbol and
257 * addend key. This is a stable O(n*log n) complexity
258 * alogrithm but it will reduce the complexity of
259 * count_relocs() to linear complexity O(n)
260 */
261 sort((void *)sechdrs[i].sh_addr,
262 sechdrs[i].sh_size / sizeof(Elf64_Rela),
263 sizeof(Elf64_Rela), relacmp, relaswap);
264
265 relocs += count_relocs((void *)sechdrs[i].sh_addr,
266 sechdrs[i].sh_size
267 / sizeof(Elf64_Rela));
268 }
269 }
270
271#ifdef CONFIG_DYNAMIC_FTRACE
272 /* make the trampoline to the ftrace_caller */
273 relocs++;
274#endif
275
276 pr_debug("Looks like a total of %lu stubs, max\n", relocs);
277 return relocs * sizeof(struct ppc64_stub_entry);
278}
279
280/* Still needed for ELFv2, for .TOC. */
281static void dedotify_versions(struct modversion_info *vers,
282 unsigned long size)
283{
284 struct modversion_info *end;
285
286 for (end = (void *)vers + size; vers < end; vers++)
287 if (vers->name[0] == '.') {
288 memmove(vers->name, vers->name+1, strlen(vers->name));
289 }
290}
291
292/*
293 * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
294 * seem to be defined (value set later).
295 */
296static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
297{
298 unsigned int i;
299
300 for (i = 1; i < numsyms; i++) {
301 if (syms[i].st_shndx == SHN_UNDEF) {
302 char *name = strtab + syms[i].st_name;
303 if (name[0] == '.') {
304 if (strcmp(name+1, "TOC.") == 0)
305 syms[i].st_shndx = SHN_ABS;
306 syms[i].st_name++;
307 }
308 }
309 }
310}
311
312static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
313 const char *strtab,
314 unsigned int symindex)
315{
316 unsigned int i, numsyms;
317 Elf64_Sym *syms;
318
319 syms = (Elf64_Sym *)sechdrs[symindex].sh_addr;
320 numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
321
322 for (i = 1; i < numsyms; i++) {
323 if (syms[i].st_shndx == SHN_ABS
324 && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
325 return &syms[i];
326 }
327 return NULL;
328}
329
330int module_frob_arch_sections(Elf64_Ehdr *hdr,
331 Elf64_Shdr *sechdrs,
332 char *secstrings,
333 struct module *me)
334{
335 unsigned int i;
336
337 /* Find .toc and .stubs sections, symtab and strtab */
338 for (i = 1; i < hdr->e_shnum; i++) {
339 char *p;
340 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
341 me->arch.stubs_section = i;
342 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
343 me->arch.toc_section = i;
344 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
345 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
346 sechdrs[i].sh_size);
347
348 /* We don't handle .init for the moment: rename to _init */
349 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
350 p[0] = '_';
351
352 if (sechdrs[i].sh_type == SHT_SYMTAB)
353 dedotify((void *)hdr + sechdrs[i].sh_offset,
354 sechdrs[i].sh_size / sizeof(Elf64_Sym),
355 (void *)hdr
356 + sechdrs[sechdrs[i].sh_link].sh_offset);
357 }
358
359 if (!me->arch.stubs_section) {
360 pr_err("%s: doesn't contain .stubs.\n", me->name);
361 return -ENOEXEC;
362 }
363
364 /* If we don't have a .toc, just use .stubs. We need to set r2
365 to some reasonable value in case the module calls out to
366 other functions via a stub, or if a function pointer escapes
367 the module by some means. */
368 if (!me->arch.toc_section)
369 me->arch.toc_section = me->arch.stubs_section;
370
371 /* Override the stubs size */
372 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
373 return 0;
374}
375
376/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
377 gives the value maximum span in an instruction which uses a signed
378 offset) */
379static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
380{
381 return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
382}
383
384/* Both low and high 16 bits are added as SIGNED additions, so if low
385 16 bits has high bit set, high 16 bits must be adjusted. These
386 macros do that (stolen from binutils). */
387#define PPC_LO(v) ((v) & 0xffff)
388#define PPC_HI(v) (((v) >> 16) & 0xffff)
389#define PPC_HA(v) PPC_HI ((v) + 0x8000)
390
391/* Patch stub to reference function and correct r2 value. */
392static inline int create_stub(const Elf64_Shdr *sechdrs,
393 struct ppc64_stub_entry *entry,
394 unsigned long addr,
395 struct module *me)
396{
397 long reladdr;
398
399 memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
400
401 /* Stub uses address relative to r2. */
402 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
403 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
404 pr_err("%s: Address %p of stub out of range of %p.\n",
405 me->name, (void *)reladdr, (void *)my_r2);
406 return 0;
407 }
408 pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
409
410 entry->jump[0] |= PPC_HA(reladdr);
411 entry->jump[1] |= PPC_LO(reladdr);
412 entry->funcdata = func_desc(addr);
413 entry->magic = STUB_MAGIC;
414
415 return 1;
416}
417
418/* Create stub to jump to function described in this OPD/ptr: we need the
419 stub to set up the TOC ptr (r2) for the function. */
420static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
421 unsigned long addr,
422 struct module *me)
423{
424 struct ppc64_stub_entry *stubs;
425 unsigned int i, num_stubs;
426
427 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
428
429 /* Find this stub, or if that fails, the next avail. entry */
430 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
431 for (i = 0; stub_func_addr(stubs[i].funcdata); i++) {
432 BUG_ON(i >= num_stubs);
433
434 if (stub_func_addr(stubs[i].funcdata) == func_addr(addr))
435 return (unsigned long)&stubs[i];
436 }
437
438 if (!create_stub(sechdrs, &stubs[i], addr, me))
439 return 0;
440
441 return (unsigned long)&stubs[i];
442}
443
444#ifdef CC_USING_MPROFILE_KERNEL
445static bool is_early_mcount_callsite(u32 *instruction)
446{
447 /*
448 * Check if this is one of the -mprofile-kernel sequences.
449 */
450 if (instruction[-1] == PPC_INST_STD_LR &&
451 instruction[-2] == PPC_INST_MFLR)
452 return true;
453
454 if (instruction[-1] == PPC_INST_MFLR)
455 return true;
456
457 return false;
458}
459
460/*
461 * In case of _mcount calls, do not save the current callee's TOC (in r2) into
462 * the original caller's stack frame. If we did we would clobber the saved TOC
463 * value of the original caller.
464 */
465static void squash_toc_save_inst(const char *name, unsigned long addr)
466{
467 struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr;
468
469 /* Only for calls to _mcount */
470 if (strcmp("_mcount", name) != 0)
471 return;
472
473 stub->jump[2] = PPC_INST_NOP;
474}
475#else
476static void squash_toc_save_inst(const char *name, unsigned long addr) { }
477
478/* without -mprofile-kernel, mcount calls are never early */
479static bool is_early_mcount_callsite(u32 *instruction)
480{
481 return false;
482}
483#endif
484
485/* We expect a noop next: if it is, replace it with instruction to
486 restore r2. */
487static int restore_r2(u32 *instruction, struct module *me)
488{
489 if (is_early_mcount_callsite(instruction - 1))
490 return 1;
491
492 if (*instruction != PPC_INST_NOP) {
493 pr_err("%s: Expect noop after relocate, got %08x\n",
494 me->name, *instruction);
495 return 0;
496 }
497 /* ld r2,R2_STACK_OFFSET(r1) */
498 *instruction = PPC_INST_LD_TOC;
499 return 1;
500}
501
502int apply_relocate_add(Elf64_Shdr *sechdrs,
503 const char *strtab,
504 unsigned int symindex,
505 unsigned int relsec,
506 struct module *me)
507{
508 unsigned int i;
509 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
510 Elf64_Sym *sym;
511 unsigned long *location;
512 unsigned long value;
513
514 pr_debug("Applying ADD relocate section %u to %u\n", relsec,
515 sechdrs[relsec].sh_info);
516
517 /* First time we're called, we can fix up .TOC. */
518 if (!me->arch.toc_fixed) {
519 sym = find_dot_toc(sechdrs, strtab, symindex);
520 /* It's theoretically possible that a module doesn't want a
521 * .TOC. so don't fail it just for that. */
522 if (sym)
523 sym->st_value = my_r2(sechdrs, me);
524 me->arch.toc_fixed = true;
525 }
526
527 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
528 /* This is where to make the change */
529 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
530 + rela[i].r_offset;
531 /* This is the symbol it is referring to */
532 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
533 + ELF64_R_SYM(rela[i].r_info);
534
535 pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
536 location, (long)ELF64_R_TYPE(rela[i].r_info),
537 strtab + sym->st_name, (unsigned long)sym->st_value,
538 (long)rela[i].r_addend);
539
540 /* `Everything is relative'. */
541 value = sym->st_value + rela[i].r_addend;
542
543 switch (ELF64_R_TYPE(rela[i].r_info)) {
544 case R_PPC64_ADDR32:
545 /* Simply set it */
546 *(u32 *)location = value;
547 break;
548
549 case R_PPC64_ADDR64:
550 /* Simply set it */
551 *(unsigned long *)location = value;
552 break;
553
554 case R_PPC64_TOC:
555 *(unsigned long *)location = my_r2(sechdrs, me);
556 break;
557
558 case R_PPC64_TOC16:
559 /* Subtract TOC pointer */
560 value -= my_r2(sechdrs, me);
561 if (value + 0x8000 > 0xffff) {
562 pr_err("%s: bad TOC16 relocation (0x%lx)\n",
563 me->name, value);
564 return -ENOEXEC;
565 }
566 *((uint16_t *) location)
567 = (*((uint16_t *) location) & ~0xffff)
568 | (value & 0xffff);
569 break;
570
571 case R_PPC64_TOC16_LO:
572 /* Subtract TOC pointer */
573 value -= my_r2(sechdrs, me);
574 *((uint16_t *) location)
575 = (*((uint16_t *) location) & ~0xffff)
576 | (value & 0xffff);
577 break;
578
579 case R_PPC64_TOC16_DS:
580 /* Subtract TOC pointer */
581 value -= my_r2(sechdrs, me);
582 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
583 pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
584 me->name, value);
585 return -ENOEXEC;
586 }
587 *((uint16_t *) location)
588 = (*((uint16_t *) location) & ~0xfffc)
589 | (value & 0xfffc);
590 break;
591
592 case R_PPC64_TOC16_LO_DS:
593 /* Subtract TOC pointer */
594 value -= my_r2(sechdrs, me);
595 if ((value & 3) != 0) {
596 pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
597 me->name, value);
598 return -ENOEXEC;
599 }
600 *((uint16_t *) location)
601 = (*((uint16_t *) location) & ~0xfffc)
602 | (value & 0xfffc);
603 break;
604
605 case R_PPC64_TOC16_HA:
606 /* Subtract TOC pointer */
607 value -= my_r2(sechdrs, me);
608 value = ((value + 0x8000) >> 16);
609 *((uint16_t *) location)
610 = (*((uint16_t *) location) & ~0xffff)
611 | (value & 0xffff);
612 break;
613
614 case R_PPC_REL24:
615 /* FIXME: Handle weak symbols here --RR */
616 if (sym->st_shndx == SHN_UNDEF) {
617 /* External: go via stub */
618 value = stub_for_addr(sechdrs, value, me);
619 if (!value)
620 return -ENOENT;
621 if (!restore_r2((u32 *)location + 1, me))
622 return -ENOEXEC;
623
624 squash_toc_save_inst(strtab + sym->st_name, value);
625 } else
626 value += local_entry_offset(sym);
627
628 /* Convert value to relative */
629 value -= (unsigned long)location;
630 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
631 pr_err("%s: REL24 %li out of range!\n",
632 me->name, (long int)value);
633 return -ENOEXEC;
634 }
635
636 /* Only replace bits 2 through 26 */
637 *(uint32_t *)location
638 = (*(uint32_t *)location & ~0x03fffffc)
639 | (value & 0x03fffffc);
640 break;
641
642 case R_PPC64_REL64:
643 /* 64 bits relative (used by features fixups) */
644 *location = value - (unsigned long)location;
645 break;
646
647 case R_PPC64_REL32:
648 /* 32 bits relative (used by relative exception tables) */
649 *(u32 *)location = value - (unsigned long)location;
650 break;
651
652 case R_PPC64_TOCSAVE:
653 /*
654 * Marker reloc indicates we don't have to save r2.
655 * That would only save us one instruction, so ignore
656 * it.
657 */
658 break;
659
660 case R_PPC64_ENTRY:
661 /*
662 * Optimize ELFv2 large code model entry point if
663 * the TOC is within 2GB range of current location.
664 */
665 value = my_r2(sechdrs, me) - (unsigned long)location;
666 if (value + 0x80008000 > 0xffffffff)
667 break;
668 /*
669 * Check for the large code model prolog sequence:
670 * ld r2, ...(r12)
671 * add r2, r2, r12
672 */
673 if ((((uint32_t *)location)[0] & ~0xfffc)
674 != 0xe84c0000)
675 break;
676 if (((uint32_t *)location)[1] != 0x7c426214)
677 break;
678 /*
679 * If found, replace it with:
680 * addis r2, r12, (.TOC.-func)@ha
681 * addi r2, r12, (.TOC.-func)@l
682 */
683 ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
684 ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
685 break;
686
687 case R_PPC64_REL16_HA:
688 /* Subtract location pointer */
689 value -= (unsigned long)location;
690 value = ((value + 0x8000) >> 16);
691 *((uint16_t *) location)
692 = (*((uint16_t *) location) & ~0xffff)
693 | (value & 0xffff);
694 break;
695
696 case R_PPC64_REL16_LO:
697 /* Subtract location pointer */
698 value -= (unsigned long)location;
699 *((uint16_t *) location)
700 = (*((uint16_t *) location) & ~0xffff)
701 | (value & 0xffff);
702 break;
703
704 default:
705 pr_err("%s: Unknown ADD relocation: %lu\n",
706 me->name,
707 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
708 return -ENOEXEC;
709 }
710 }
711
712 return 0;
713}
714
715#ifdef CONFIG_DYNAMIC_FTRACE
716
717#ifdef CC_USING_MPROFILE_KERNEL
718
719#define PACATOC offsetof(struct paca_struct, kernel_toc)
720
721/*
722 * For mprofile-kernel we use a special stub for ftrace_caller() because we
723 * can't rely on r2 containing this module's TOC when we enter the stub.
724 *
725 * That can happen if the function calling us didn't need to use the toc. In
726 * that case it won't have setup r2, and the r2 value will be either the
727 * kernel's toc, or possibly another modules toc.
728 *
729 * To deal with that this stub uses the kernel toc, which is always accessible
730 * via the paca (in r13). The target (ftrace_caller()) is responsible for
731 * saving and restoring the toc before returning.
732 */
733static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me)
734{
735 struct ppc64_stub_entry *entry;
736 unsigned int i, num_stubs;
737 static u32 stub_insns[] = {
738 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
739 0x3d8c0000, /* addis r12,r12,<high> */
740 0x398c0000, /* addi r12,r12,<low> */
741 0x7d8903a6, /* mtctr r12 */
742 0x4e800420, /* bctr */
743 };
744 long reladdr;
745
746 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry);
747
748 /* Find the next available stub entry */
749 entry = (void *)sechdrs[me->arch.stubs_section].sh_addr;
750 for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++);
751
752 if (i >= num_stubs) {
753 pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name);
754 return 0;
755 }
756
757 memcpy(entry->jump, stub_insns, sizeof(stub_insns));
758
759 /* Stub uses address relative to kernel toc (from the paca) */
760 reladdr = (unsigned long)ftrace_caller - kernel_toc_addr();
761 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
762 pr_err("%s: Address of ftrace_caller out of range of kernel_toc.\n", me->name);
763 return 0;
764 }
765
766 entry->jump[1] |= PPC_HA(reladdr);
767 entry->jump[2] |= PPC_LO(reladdr);
768
769 /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
770 entry->funcdata = func_desc((unsigned long)ftrace_caller);
771 entry->magic = STUB_MAGIC;
772
773 return (unsigned long)entry;
774}
775#else
776static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me)
777{
778 return stub_for_addr(sechdrs, (unsigned long)ftrace_caller, me);
779}
780#endif
781
782int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
783{
784 mod->arch.toc = my_r2(sechdrs, mod);
785 mod->arch.tramp = create_ftrace_stub(sechdrs, mod);
786
787 if (!mod->arch.tramp)
788 return -ENOENT;
789
790 return 0;
791}
792#endif