Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-64-specific support for kernel module loader.
4 *
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * Loosely based on patch by Rusty Russell.
9 */
10
11/* relocs tested so far:
12
13 DIR64LSB
14 FPTR64LSB
15 GPREL22
16 LDXMOV
17 LDXMOV
18 LTOFF22
19 LTOFF22X
20 LTOFF22X
21 LTOFF_FPTR22
22 PCREL21B (for br.call only; br.cond is not supported out of modules!)
23 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
24 PCREL64LSB
25 SECREL32LSB
26 SEGREL64LSB
27 */
28
29
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/elf.h>
33#include <linux/moduleloader.h>
34#include <linux/string.h>
35#include <linux/vmalloc.h>
36
37#include <asm/patch.h>
38#include <asm/unaligned.h>
39#include <asm/sections.h>
40
41#define ARCH_MODULE_DEBUG 0
42
43#if ARCH_MODULE_DEBUG
44# define DEBUGP printk
45# define inline
46#else
47# define DEBUGP(fmt , a...)
48#endif
49
50#ifdef CONFIG_ITANIUM
51# define USE_BRL 0
52#else
53# define USE_BRL 1
54#endif
55
56#define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
57
58/* Define some relocation helper macros/types: */
59
60#define FORMAT_SHIFT 0
61#define FORMAT_BITS 3
62#define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
63#define VALUE_SHIFT 3
64#define VALUE_BITS 5
65#define VALUE_MASK ((1 << VALUE_BITS) - 1)
66
67enum reloc_target_format {
68 /* direct encoded formats: */
69 RF_NONE = 0,
70 RF_INSN14 = 1,
71 RF_INSN22 = 2,
72 RF_INSN64 = 3,
73 RF_32MSB = 4,
74 RF_32LSB = 5,
75 RF_64MSB = 6,
76 RF_64LSB = 7,
77
78 /* formats that cannot be directly decoded: */
79 RF_INSN60,
80 RF_INSN21B, /* imm21 form 1 */
81 RF_INSN21M, /* imm21 form 2 */
82 RF_INSN21F /* imm21 form 3 */
83};
84
85enum reloc_value_formula {
86 RV_DIRECT = 4, /* S + A */
87 RV_GPREL = 5, /* @gprel(S + A) */
88 RV_LTREL = 6, /* @ltoff(S + A) */
89 RV_PLTREL = 7, /* @pltoff(S + A) */
90 RV_FPTR = 8, /* @fptr(S + A) */
91 RV_PCREL = 9, /* S + A - P */
92 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
93 RV_SEGREL = 11, /* @segrel(S + A) */
94 RV_SECREL = 12, /* @secrel(S + A) */
95 RV_BDREL = 13, /* BD + A */
96 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
97 RV_PCREL2 = 15, /* S + A - P */
98 RV_SPECIAL = 16, /* various (see below) */
99 RV_RSVD17 = 17,
100 RV_TPREL = 18, /* @tprel(S + A) */
101 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
102 RV_DTPMOD = 20, /* @dtpmod(S + A) */
103 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
104 RV_DTPREL = 22, /* @dtprel(S + A) */
105 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
106 RV_RSVD24 = 24,
107 RV_RSVD25 = 25,
108 RV_RSVD26 = 26,
109 RV_RSVD27 = 27
110 /* 28-31 reserved for implementation-specific purposes. */
111};
112
113#define N(reloc) [R_IA64_##reloc] = #reloc
114
115static const char *reloc_name[256] = {
116 N(NONE), N(IMM14), N(IMM22), N(IMM64),
117 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
118 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
119 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
120 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
121 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
122 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
123 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
124 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
125 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
126 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
127 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
128 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
129 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
130 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
131 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
132 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
133 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
134 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
135 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
136};
137
138#undef N
139
140/* Opaque struct for insns, to protect against derefs. */
141struct insn;
142
143static inline uint64_t
144bundle (const struct insn *insn)
145{
146 return (uint64_t) insn & ~0xfUL;
147}
148
149static inline int
150slot (const struct insn *insn)
151{
152 return (uint64_t) insn & 0x3;
153}
154
155static int
156apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
157{
158 if (slot(insn) != 1 && slot(insn) != 2) {
159 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
160 mod->name, slot(insn));
161 return 0;
162 }
163 ia64_patch_imm64((u64) insn, val);
164 return 1;
165}
166
167static int
168apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
169{
170 if (slot(insn) != 1 && slot(insn) != 2) {
171 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
172 mod->name, slot(insn));
173 return 0;
174 }
175 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
176 printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
177 mod->name, (long) val);
178 return 0;
179 }
180 ia64_patch_imm60((u64) insn, val);
181 return 1;
182}
183
184static int
185apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
186{
187 if (val + (1 << 21) >= (1 << 22)) {
188 printk(KERN_ERR "%s: value %li out of IMM22 range\n",
189 mod->name, (long)val);
190 return 0;
191 }
192 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
193 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
194 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
195 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
196 return 1;
197}
198
199static int
200apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
201{
202 if (val + (1 << 20) >= (1 << 21)) {
203 printk(KERN_ERR "%s: value %li out of IMM21b range\n",
204 mod->name, (long)val);
205 return 0;
206 }
207 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
208 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
209 return 1;
210}
211
212#if USE_BRL
213
214struct plt_entry {
215 /* Three instruction bundles in PLT. */
216 unsigned char bundle[2][16];
217};
218
219static const struct plt_entry ia64_plt_template = {
220 {
221 {
222 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
224 0x00, 0x00, 0x00, 0x60
225 },
226 {
227 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
229 0x08, 0x00, 0x00, 0xc0
230 }
231 }
232};
233
234static int
235patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
236{
237 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
238 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
239 (target_ip - (int64_t) plt->bundle[1]) / 16))
240 return 1;
241 return 0;
242}
243
244unsigned long
245plt_target (struct plt_entry *plt)
246{
247 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
248 long off;
249
250 b0 = b[0]; b1 = b[1];
251 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
252 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
253 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
254 return (long) plt->bundle[1] + 16*off;
255}
256
257#else /* !USE_BRL */
258
259struct plt_entry {
260 /* Three instruction bundles in PLT. */
261 unsigned char bundle[3][16];
262};
263
264static const struct plt_entry ia64_plt_template = {
265 {
266 {
267 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
269 0x02, 0x00, 0x00, 0x60
270 },
271 {
272 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
274 0x00, 0x00, 0x00, 0x60
275 },
276 {
277 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
278 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
279 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
280 }
281 }
282};
283
284static int
285patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
286{
287 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
288 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
289 return 1;
290 return 0;
291}
292
293unsigned long
294plt_target (struct plt_entry *plt)
295{
296 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
297
298 b0 = b[0]; b1 = b[1];
299 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
300 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
301 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
302 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
303 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
304 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
305}
306
307#endif /* !USE_BRL */
308
309void
310module_arch_freeing_init (struct module *mod)
311{
312 if (mod->arch.init_unw_table) {
313 unw_remove_unwind_table(mod->arch.init_unw_table);
314 mod->arch.init_unw_table = NULL;
315 }
316}
317
318/* Have we already seen one of these relocations? */
319/* FIXME: we could look in other sections, too --RR */
320static int
321duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
322{
323 unsigned int i;
324
325 for (i = 0; i < num; i++) {
326 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
327 return 1;
328 }
329 return 0;
330}
331
332/* Count how many GOT entries we may need */
333static unsigned int
334count_gots (const Elf64_Rela *rela, unsigned int num)
335{
336 unsigned int i, ret = 0;
337
338 /* Sure, this is order(n^2), but it's usually short, and not
339 time critical */
340 for (i = 0; i < num; i++) {
341 switch (ELF64_R_TYPE(rela[i].r_info)) {
342 case R_IA64_LTOFF22:
343 case R_IA64_LTOFF22X:
344 case R_IA64_LTOFF64I:
345 case R_IA64_LTOFF_FPTR22:
346 case R_IA64_LTOFF_FPTR64I:
347 case R_IA64_LTOFF_FPTR32MSB:
348 case R_IA64_LTOFF_FPTR32LSB:
349 case R_IA64_LTOFF_FPTR64MSB:
350 case R_IA64_LTOFF_FPTR64LSB:
351 if (!duplicate_reloc(rela, i))
352 ret++;
353 break;
354 }
355 }
356 return ret;
357}
358
359/* Count how many PLT entries we may need */
360static unsigned int
361count_plts (const Elf64_Rela *rela, unsigned int num)
362{
363 unsigned int i, ret = 0;
364
365 /* Sure, this is order(n^2), but it's usually short, and not
366 time critical */
367 for (i = 0; i < num; i++) {
368 switch (ELF64_R_TYPE(rela[i].r_info)) {
369 case R_IA64_PCREL21B:
370 case R_IA64_PLTOFF22:
371 case R_IA64_PLTOFF64I:
372 case R_IA64_PLTOFF64MSB:
373 case R_IA64_PLTOFF64LSB:
374 case R_IA64_IPLTMSB:
375 case R_IA64_IPLTLSB:
376 if (!duplicate_reloc(rela, i))
377 ret++;
378 break;
379 }
380 }
381 return ret;
382}
383
384/* We need to create an function-descriptors for any internal function
385 which is referenced. */
386static unsigned int
387count_fdescs (const Elf64_Rela *rela, unsigned int num)
388{
389 unsigned int i, ret = 0;
390
391 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
392 for (i = 0; i < num; i++) {
393 switch (ELF64_R_TYPE(rela[i].r_info)) {
394 case R_IA64_FPTR64I:
395 case R_IA64_FPTR32LSB:
396 case R_IA64_FPTR32MSB:
397 case R_IA64_FPTR64LSB:
398 case R_IA64_FPTR64MSB:
399 case R_IA64_LTOFF_FPTR22:
400 case R_IA64_LTOFF_FPTR32LSB:
401 case R_IA64_LTOFF_FPTR32MSB:
402 case R_IA64_LTOFF_FPTR64I:
403 case R_IA64_LTOFF_FPTR64LSB:
404 case R_IA64_LTOFF_FPTR64MSB:
405 case R_IA64_IPLTMSB:
406 case R_IA64_IPLTLSB:
407 /*
408 * Jumps to static functions sometimes go straight to their
409 * offset. Of course, that may not be possible if the jump is
410 * from init -> core or vice. versa, so we need to generate an
411 * FDESC (and PLT etc) for that.
412 */
413 case R_IA64_PCREL21B:
414 if (!duplicate_reloc(rela, i))
415 ret++;
416 break;
417 }
418 }
419 return ret;
420}
421
422int
423module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
424 struct module *mod)
425{
426 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
427 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
428
429 /*
430 * To store the PLTs and function-descriptors, we expand the .text section for
431 * core module-code and the .init.text section for initialization code.
432 */
433 for (s = sechdrs; s < sechdrs_end; ++s)
434 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
435 mod->arch.core_plt = s;
436 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
437 mod->arch.init_plt = s;
438 else if (strcmp(".got", secstrings + s->sh_name) == 0)
439 mod->arch.got = s;
440 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
441 mod->arch.opd = s;
442 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
443 mod->arch.unwind = s;
444
445 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
446 printk(KERN_ERR "%s: sections missing\n", mod->name);
447 return -ENOEXEC;
448 }
449
450 /* GOT and PLTs can occur in any relocated section... */
451 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
452 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
453 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
454
455 if (s->sh_type != SHT_RELA)
456 continue;
457
458 gots += count_gots(rels, numrels);
459 fdescs += count_fdescs(rels, numrels);
460 if (strstr(secstrings + s->sh_name, ".init"))
461 init_plts += count_plts(rels, numrels);
462 else
463 core_plts += count_plts(rels, numrels);
464 }
465
466 mod->arch.core_plt->sh_type = SHT_NOBITS;
467 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
468 mod->arch.core_plt->sh_addralign = 16;
469 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
470 mod->arch.init_plt->sh_type = SHT_NOBITS;
471 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
472 mod->arch.init_plt->sh_addralign = 16;
473 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
474 mod->arch.got->sh_type = SHT_NOBITS;
475 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
476 mod->arch.got->sh_addralign = 8;
477 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
478 mod->arch.opd->sh_type = SHT_NOBITS;
479 mod->arch.opd->sh_flags = SHF_ALLOC;
480 mod->arch.opd->sh_addralign = 8;
481 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
482 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
483 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
484 mod->arch.got->sh_size, mod->arch.opd->sh_size);
485 return 0;
486}
487
488static inline int
489in_init (const struct module *mod, uint64_t addr)
490{
491 return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
492}
493
494static inline int
495in_core (const struct module *mod, uint64_t addr)
496{
497 return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
498}
499
500static inline int
501is_internal (const struct module *mod, uint64_t value)
502{
503 return in_init(mod, value) || in_core(mod, value);
504}
505
506/*
507 * Get gp-relative offset for the linkage-table entry of VALUE.
508 */
509static uint64_t
510get_ltoff (struct module *mod, uint64_t value, int *okp)
511{
512 struct got_entry *got, *e;
513
514 if (!*okp)
515 return 0;
516
517 got = (void *) mod->arch.got->sh_addr;
518 for (e = got; e < got + mod->arch.next_got_entry; ++e)
519 if (e->val == value)
520 goto found;
521
522 /* Not enough GOT entries? */
523 BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
524
525 e->val = value;
526 ++mod->arch.next_got_entry;
527 found:
528 return (uint64_t) e - mod->arch.gp;
529}
530
531static inline int
532gp_addressable (struct module *mod, uint64_t value)
533{
534 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
535}
536
537/* Get PC-relative PLT entry for this value. Returns 0 on failure. */
538static uint64_t
539get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
540{
541 struct plt_entry *plt, *plt_end;
542 uint64_t target_ip, target_gp;
543
544 if (!*okp)
545 return 0;
546
547 if (in_init(mod, (uint64_t) insn)) {
548 plt = (void *) mod->arch.init_plt->sh_addr;
549 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
550 } else {
551 plt = (void *) mod->arch.core_plt->sh_addr;
552 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
553 }
554
555 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
556 target_ip = ((uint64_t *) value)[0];
557 target_gp = ((uint64_t *) value)[1];
558
559 /* Look for existing PLT entry. */
560 while (plt->bundle[0][0]) {
561 if (plt_target(plt) == target_ip)
562 goto found;
563 if (++plt >= plt_end)
564 BUG();
565 }
566 *plt = ia64_plt_template;
567 if (!patch_plt(mod, plt, target_ip, target_gp)) {
568 *okp = 0;
569 return 0;
570 }
571#if ARCH_MODULE_DEBUG
572 if (plt_target(plt) != target_ip) {
573 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
574 __func__, target_ip, plt_target(plt));
575 *okp = 0;
576 return 0;
577 }
578#endif
579 found:
580 return (uint64_t) plt;
581}
582
583/* Get function descriptor for VALUE. */
584static uint64_t
585get_fdesc (struct module *mod, uint64_t value, int *okp)
586{
587 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
588
589 if (!*okp)
590 return 0;
591
592 if (!value) {
593 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
594 return 0;
595 }
596
597 if (!is_internal(mod, value))
598 /*
599 * If it's not a module-local entry-point, "value" already points to a
600 * function-descriptor.
601 */
602 return value;
603
604 /* Look for existing function descriptor. */
605 while (fdesc->addr) {
606 if (fdesc->addr == value)
607 return (uint64_t)fdesc;
608 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
609 BUG();
610 }
611
612 /* Create new one */
613 fdesc->addr = value;
614 fdesc->gp = mod->arch.gp;
615 return (uint64_t) fdesc;
616}
617
618static inline int
619do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
620 Elf64_Shdr *sec, void *location)
621{
622 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
623 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
624 uint64_t val;
625 int ok = 1;
626
627 val = sym->st_value + addend;
628
629 switch (formula) {
630 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
631 case RV_DIRECT:
632 break;
633
634 case RV_GPREL: val -= mod->arch.gp; break;
635 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
636 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
637 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
638 case RV_SECREL: val -= sec->sh_addr; break;
639 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
640
641 case RV_PCREL:
642 switch (r_type) {
643 case R_IA64_PCREL21B:
644 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
645 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
646 /*
647 * Init section may have been allocated far away from core,
648 * if the branch won't reach, then allocate a plt for it.
649 */
650 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
651 if (delta + (1 << 20) >= (1 << 21)) {
652 val = get_fdesc(mod, val, &ok);
653 val = get_plt(mod, location, val, &ok);
654 }
655 } else if (!is_internal(mod, val))
656 val = get_plt(mod, location, val, &ok);
657 fallthrough;
658 default:
659 val -= bundle(location);
660 break;
661
662 case R_IA64_PCREL32MSB:
663 case R_IA64_PCREL32LSB:
664 case R_IA64_PCREL64MSB:
665 case R_IA64_PCREL64LSB:
666 val -= (uint64_t) location;
667 break;
668
669 }
670 switch (r_type) {
671 case R_IA64_PCREL60B: format = RF_INSN60; break;
672 case R_IA64_PCREL21B: format = RF_INSN21B; break;
673 case R_IA64_PCREL21M: format = RF_INSN21M; break;
674 case R_IA64_PCREL21F: format = RF_INSN21F; break;
675 default: break;
676 }
677 break;
678
679 case RV_BDREL:
680 val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
681 break;
682
683 case RV_LTV:
684 /* can link-time value relocs happen here? */
685 BUG();
686 break;
687
688 case RV_PCREL2:
689 if (r_type == R_IA64_PCREL21BI) {
690 if (!is_internal(mod, val)) {
691 printk(KERN_ERR "%s: %s reloc against "
692 "non-local symbol (%lx)\n", __func__,
693 reloc_name[r_type], (unsigned long)val);
694 return -ENOEXEC;
695 }
696 format = RF_INSN21B;
697 }
698 val -= bundle(location);
699 break;
700
701 case RV_SPECIAL:
702 switch (r_type) {
703 case R_IA64_IPLTMSB:
704 case R_IA64_IPLTLSB:
705 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
706 format = RF_64LSB;
707 if (r_type == R_IA64_IPLTMSB)
708 format = RF_64MSB;
709 break;
710
711 case R_IA64_SUB:
712 val = addend - sym->st_value;
713 format = RF_INSN64;
714 break;
715
716 case R_IA64_LTOFF22X:
717 if (gp_addressable(mod, val))
718 val -= mod->arch.gp;
719 else
720 val = get_ltoff(mod, val, &ok);
721 format = RF_INSN22;
722 break;
723
724 case R_IA64_LDXMOV:
725 if (gp_addressable(mod, val)) {
726 /* turn "ld8" into "mov": */
727 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
728 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
729 }
730 return 0;
731
732 default:
733 if (reloc_name[r_type])
734 printk(KERN_ERR "%s: special reloc %s not supported",
735 mod->name, reloc_name[r_type]);
736 else
737 printk(KERN_ERR "%s: unknown special reloc %x\n",
738 mod->name, r_type);
739 return -ENOEXEC;
740 }
741 break;
742
743 case RV_TPREL:
744 case RV_LTREL_TPREL:
745 case RV_DTPMOD:
746 case RV_LTREL_DTPMOD:
747 case RV_DTPREL:
748 case RV_LTREL_DTPREL:
749 printk(KERN_ERR "%s: %s reloc not supported\n",
750 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
751 return -ENOEXEC;
752
753 default:
754 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
755 return -ENOEXEC;
756 }
757
758 if (!ok)
759 return -ENOEXEC;
760
761 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
762 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
763
764 switch (format) {
765 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
766 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
767 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
768 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
769 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
770 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
771 case RF_32MSB: /* ia64 Linux is little-endian... */
772 case RF_64MSB: /* ia64 Linux is little-endian... */
773 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
774 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
775 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
776 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
777 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
778 return -ENOEXEC;
779
780 default:
781 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
782 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
783 return -ENOEXEC;
784 }
785 return ok ? 0 : -ENOEXEC;
786}
787
788int
789apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
790 unsigned int relsec, struct module *mod)
791{
792 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
793 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
794 Elf64_Shdr *target_sec;
795 int ret;
796
797 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
798 relsec, n, sechdrs[relsec].sh_info);
799
800 target_sec = sechdrs + sechdrs[relsec].sh_info;
801
802 if (target_sec->sh_entsize == ~0UL)
803 /*
804 * If target section wasn't allocated, we don't need to relocate it.
805 * Happens, e.g., for debug sections.
806 */
807 return 0;
808
809 if (!mod->arch.gp) {
810 /*
811 * XXX Should have an arch-hook for running this after final section
812 * addresses have been selected...
813 */
814 uint64_t gp;
815 if (mod->core_layout.size > MAX_LTOFF)
816 /*
817 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
818 * at the end of the module.
819 */
820 gp = mod->core_layout.size - MAX_LTOFF / 2;
821 else
822 gp = mod->core_layout.size / 2;
823 gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
824 mod->arch.gp = gp;
825 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
826 }
827
828 for (i = 0; i < n; i++) {
829 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
830 ((Elf64_Sym *) sechdrs[symindex].sh_addr
831 + ELF64_R_SYM(rela[i].r_info)),
832 rela[i].r_addend, target_sec,
833 (void *) target_sec->sh_addr + rela[i].r_offset);
834 if (ret < 0)
835 return ret;
836 }
837 return 0;
838}
839
840/*
841 * Modules contain a single unwind table which covers both the core and the init text
842 * sections but since the two are not contiguous, we need to split this table up such that
843 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
844 * more complicated than it really is.
845 */
846static void
847register_unwind_table (struct module *mod)
848{
849 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
850 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
851 struct unw_table_entry *e1, *e2, *core, *init;
852 unsigned long num_init = 0, num_core = 0;
853
854 /* First, count how many init and core unwind-table entries there are. */
855 for (e1 = start; e1 < end; ++e1)
856 if (in_init(mod, e1->start_offset))
857 ++num_init;
858 else
859 ++num_core;
860 /*
861 * Second, sort the table such that all unwind-table entries for the init and core
862 * text sections are nicely separated. We do this with a stupid bubble sort
863 * (unwind tables don't get ridiculously huge).
864 */
865 for (e1 = start; e1 < end; ++e1) {
866 for (e2 = e1 + 1; e2 < end; ++e2) {
867 if (e2->start_offset < e1->start_offset) {
868 swap(*e1, *e2);
869 }
870 }
871 }
872 /*
873 * Third, locate the init and core segments in the unwind table:
874 */
875 if (in_init(mod, start->start_offset)) {
876 init = start;
877 core = start + num_init;
878 } else {
879 core = start;
880 init = start + num_core;
881 }
882
883 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
884 mod->name, mod->arch.gp, num_init, num_core);
885
886 /*
887 * Fourth, register both tables (if not empty).
888 */
889 if (num_core > 0) {
890 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
891 core, core + num_core);
892 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
893 mod->arch.core_unw_table, core, core + num_core);
894 }
895 if (num_init > 0) {
896 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
897 init, init + num_init);
898 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
899 mod->arch.init_unw_table, init, init + num_init);
900 }
901}
902
903int
904module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
905{
906 struct mod_arch_specific *mas = &mod->arch;
907
908 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
909 if (mas->unwind)
910 register_unwind_table(mod);
911
912 /*
913 * ".opd" was already relocated to the final destination. Store
914 * it's address for use in symbolizer.
915 */
916 mas->opd_addr = (void *)mas->opd->sh_addr;
917 mas->opd_size = mas->opd->sh_size;
918
919 /*
920 * Module relocation was already done at this point. Section
921 * headers are about to be deleted. Wipe out load-time context.
922 */
923 mas->core_plt = NULL;
924 mas->init_plt = NULL;
925 mas->got = NULL;
926 mas->opd = NULL;
927 mas->unwind = NULL;
928 mas->gp = 0;
929 mas->next_got_entry = 0;
930
931 return 0;
932}
933
934void
935module_arch_cleanup (struct module *mod)
936{
937 if (mod->arch.init_unw_table) {
938 unw_remove_unwind_table(mod->arch.init_unw_table);
939 mod->arch.init_unw_table = NULL;
940 }
941 if (mod->arch.core_unw_table) {
942 unw_remove_unwind_table(mod->arch.core_unw_table);
943 mod->arch.core_unw_table = NULL;
944 }
945}
946
947void *dereference_module_function_descriptor(struct module *mod, void *ptr)
948{
949 struct mod_arch_specific *mas = &mod->arch;
950
951 if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
952 return ptr;
953
954 return dereference_function_descriptor(ptr);
955}
1/*
2 * IA-64-specific support for kernel module loader.
3 *
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Loosely based on patch by Rusty Russell.
8 */
9
10/* relocs tested so far:
11
12 DIR64LSB
13 FPTR64LSB
14 GPREL22
15 LDXMOV
16 LDXMOV
17 LTOFF22
18 LTOFF22X
19 LTOFF22X
20 LTOFF_FPTR22
21 PCREL21B (for br.call only; br.cond is not supported out of modules!)
22 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
23 PCREL64LSB
24 SECREL32LSB
25 SEGREL64LSB
26 */
27
28
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/elf.h>
32#include <linux/moduleloader.h>
33#include <linux/string.h>
34#include <linux/vmalloc.h>
35
36#include <asm/patch.h>
37#include <asm/unaligned.h>
38
39#define ARCH_MODULE_DEBUG 0
40
41#if ARCH_MODULE_DEBUG
42# define DEBUGP printk
43# define inline
44#else
45# define DEBUGP(fmt , a...)
46#endif
47
48#ifdef CONFIG_ITANIUM
49# define USE_BRL 0
50#else
51# define USE_BRL 1
52#endif
53
54#define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
55
56/* Define some relocation helper macros/types: */
57
58#define FORMAT_SHIFT 0
59#define FORMAT_BITS 3
60#define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
61#define VALUE_SHIFT 3
62#define VALUE_BITS 5
63#define VALUE_MASK ((1 << VALUE_BITS) - 1)
64
65enum reloc_target_format {
66 /* direct encoded formats: */
67 RF_NONE = 0,
68 RF_INSN14 = 1,
69 RF_INSN22 = 2,
70 RF_INSN64 = 3,
71 RF_32MSB = 4,
72 RF_32LSB = 5,
73 RF_64MSB = 6,
74 RF_64LSB = 7,
75
76 /* formats that cannot be directly decoded: */
77 RF_INSN60,
78 RF_INSN21B, /* imm21 form 1 */
79 RF_INSN21M, /* imm21 form 2 */
80 RF_INSN21F /* imm21 form 3 */
81};
82
83enum reloc_value_formula {
84 RV_DIRECT = 4, /* S + A */
85 RV_GPREL = 5, /* @gprel(S + A) */
86 RV_LTREL = 6, /* @ltoff(S + A) */
87 RV_PLTREL = 7, /* @pltoff(S + A) */
88 RV_FPTR = 8, /* @fptr(S + A) */
89 RV_PCREL = 9, /* S + A - P */
90 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
91 RV_SEGREL = 11, /* @segrel(S + A) */
92 RV_SECREL = 12, /* @secrel(S + A) */
93 RV_BDREL = 13, /* BD + A */
94 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
95 RV_PCREL2 = 15, /* S + A - P */
96 RV_SPECIAL = 16, /* various (see below) */
97 RV_RSVD17 = 17,
98 RV_TPREL = 18, /* @tprel(S + A) */
99 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
100 RV_DTPMOD = 20, /* @dtpmod(S + A) */
101 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
102 RV_DTPREL = 22, /* @dtprel(S + A) */
103 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
104 RV_RSVD24 = 24,
105 RV_RSVD25 = 25,
106 RV_RSVD26 = 26,
107 RV_RSVD27 = 27
108 /* 28-31 reserved for implementation-specific purposes. */
109};
110
111#define N(reloc) [R_IA64_##reloc] = #reloc
112
113static const char *reloc_name[256] = {
114 N(NONE), N(IMM14), N(IMM22), N(IMM64),
115 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
116 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
117 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
118 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
119 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
120 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
121 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
122 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
123 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
124 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
125 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
126 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
127 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
128 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
129 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
130 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
131 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
132 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
133 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
134};
135
136#undef N
137
138/* Opaque struct for insns, to protect against derefs. */
139struct insn;
140
141static inline uint64_t
142bundle (const struct insn *insn)
143{
144 return (uint64_t) insn & ~0xfUL;
145}
146
147static inline int
148slot (const struct insn *insn)
149{
150 return (uint64_t) insn & 0x3;
151}
152
153static int
154apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
155{
156 if (slot(insn) != 2) {
157 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
158 mod->name, slot(insn));
159 return 0;
160 }
161 ia64_patch_imm64((u64) insn, val);
162 return 1;
163}
164
165static int
166apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
167{
168 if (slot(insn) != 2) {
169 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
170 mod->name, slot(insn));
171 return 0;
172 }
173 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
174 printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
175 mod->name, (long) val);
176 return 0;
177 }
178 ia64_patch_imm60((u64) insn, val);
179 return 1;
180}
181
182static int
183apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
184{
185 if (val + (1 << 21) >= (1 << 22)) {
186 printk(KERN_ERR "%s: value %li out of IMM22 range\n",
187 mod->name, (long)val);
188 return 0;
189 }
190 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
191 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
192 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
193 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
194 return 1;
195}
196
197static int
198apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
199{
200 if (val + (1 << 20) >= (1 << 21)) {
201 printk(KERN_ERR "%s: value %li out of IMM21b range\n",
202 mod->name, (long)val);
203 return 0;
204 }
205 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
206 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
207 return 1;
208}
209
210#if USE_BRL
211
212struct plt_entry {
213 /* Three instruction bundles in PLT. */
214 unsigned char bundle[2][16];
215};
216
217static const struct plt_entry ia64_plt_template = {
218 {
219 {
220 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
222 0x00, 0x00, 0x00, 0x60
223 },
224 {
225 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
227 0x08, 0x00, 0x00, 0xc0
228 }
229 }
230};
231
232static int
233patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
234{
235 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
236 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
237 (target_ip - (int64_t) plt->bundle[1]) / 16))
238 return 1;
239 return 0;
240}
241
242unsigned long
243plt_target (struct plt_entry *plt)
244{
245 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
246 long off;
247
248 b0 = b[0]; b1 = b[1];
249 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
250 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
251 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
252 return (long) plt->bundle[1] + 16*off;
253}
254
255#else /* !USE_BRL */
256
257struct plt_entry {
258 /* Three instruction bundles in PLT. */
259 unsigned char bundle[3][16];
260};
261
262static const struct plt_entry ia64_plt_template = {
263 {
264 {
265 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
267 0x02, 0x00, 0x00, 0x60
268 },
269 {
270 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
272 0x00, 0x00, 0x00, 0x60
273 },
274 {
275 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
276 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
277 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
278 }
279 }
280};
281
282static int
283patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
284{
285 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
286 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
287 return 1;
288 return 0;
289}
290
291unsigned long
292plt_target (struct plt_entry *plt)
293{
294 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
295
296 b0 = b[0]; b1 = b[1];
297 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
298 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
299 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
300 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
301 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
302 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
303}
304
305#endif /* !USE_BRL */
306
307void
308module_arch_freeing_init (struct module *mod)
309{
310 if (mod->arch.init_unw_table) {
311 unw_remove_unwind_table(mod->arch.init_unw_table);
312 mod->arch.init_unw_table = NULL;
313 }
314}
315
316/* Have we already seen one of these relocations? */
317/* FIXME: we could look in other sections, too --RR */
318static int
319duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
320{
321 unsigned int i;
322
323 for (i = 0; i < num; i++) {
324 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
325 return 1;
326 }
327 return 0;
328}
329
330/* Count how many GOT entries we may need */
331static unsigned int
332count_gots (const Elf64_Rela *rela, unsigned int num)
333{
334 unsigned int i, ret = 0;
335
336 /* Sure, this is order(n^2), but it's usually short, and not
337 time critical */
338 for (i = 0; i < num; i++) {
339 switch (ELF64_R_TYPE(rela[i].r_info)) {
340 case R_IA64_LTOFF22:
341 case R_IA64_LTOFF22X:
342 case R_IA64_LTOFF64I:
343 case R_IA64_LTOFF_FPTR22:
344 case R_IA64_LTOFF_FPTR64I:
345 case R_IA64_LTOFF_FPTR32MSB:
346 case R_IA64_LTOFF_FPTR32LSB:
347 case R_IA64_LTOFF_FPTR64MSB:
348 case R_IA64_LTOFF_FPTR64LSB:
349 if (!duplicate_reloc(rela, i))
350 ret++;
351 break;
352 }
353 }
354 return ret;
355}
356
357/* Count how many PLT entries we may need */
358static unsigned int
359count_plts (const Elf64_Rela *rela, unsigned int num)
360{
361 unsigned int i, ret = 0;
362
363 /* Sure, this is order(n^2), but it's usually short, and not
364 time critical */
365 for (i = 0; i < num; i++) {
366 switch (ELF64_R_TYPE(rela[i].r_info)) {
367 case R_IA64_PCREL21B:
368 case R_IA64_PLTOFF22:
369 case R_IA64_PLTOFF64I:
370 case R_IA64_PLTOFF64MSB:
371 case R_IA64_PLTOFF64LSB:
372 case R_IA64_IPLTMSB:
373 case R_IA64_IPLTLSB:
374 if (!duplicate_reloc(rela, i))
375 ret++;
376 break;
377 }
378 }
379 return ret;
380}
381
382/* We need to create an function-descriptors for any internal function
383 which is referenced. */
384static unsigned int
385count_fdescs (const Elf64_Rela *rela, unsigned int num)
386{
387 unsigned int i, ret = 0;
388
389 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
390 for (i = 0; i < num; i++) {
391 switch (ELF64_R_TYPE(rela[i].r_info)) {
392 case R_IA64_FPTR64I:
393 case R_IA64_FPTR32LSB:
394 case R_IA64_FPTR32MSB:
395 case R_IA64_FPTR64LSB:
396 case R_IA64_FPTR64MSB:
397 case R_IA64_LTOFF_FPTR22:
398 case R_IA64_LTOFF_FPTR32LSB:
399 case R_IA64_LTOFF_FPTR32MSB:
400 case R_IA64_LTOFF_FPTR64I:
401 case R_IA64_LTOFF_FPTR64LSB:
402 case R_IA64_LTOFF_FPTR64MSB:
403 case R_IA64_IPLTMSB:
404 case R_IA64_IPLTLSB:
405 /*
406 * Jumps to static functions sometimes go straight to their
407 * offset. Of course, that may not be possible if the jump is
408 * from init -> core or vice. versa, so we need to generate an
409 * FDESC (and PLT etc) for that.
410 */
411 case R_IA64_PCREL21B:
412 if (!duplicate_reloc(rela, i))
413 ret++;
414 break;
415 }
416 }
417 return ret;
418}
419
420int
421module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
422 struct module *mod)
423{
424 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
425 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
426
427 /*
428 * To store the PLTs and function-descriptors, we expand the .text section for
429 * core module-code and the .init.text section for initialization code.
430 */
431 for (s = sechdrs; s < sechdrs_end; ++s)
432 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
433 mod->arch.core_plt = s;
434 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
435 mod->arch.init_plt = s;
436 else if (strcmp(".got", secstrings + s->sh_name) == 0)
437 mod->arch.got = s;
438 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
439 mod->arch.opd = s;
440 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
441 mod->arch.unwind = s;
442
443 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
444 printk(KERN_ERR "%s: sections missing\n", mod->name);
445 return -ENOEXEC;
446 }
447
448 /* GOT and PLTs can occur in any relocated section... */
449 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
450 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
451 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
452
453 if (s->sh_type != SHT_RELA)
454 continue;
455
456 gots += count_gots(rels, numrels);
457 fdescs += count_fdescs(rels, numrels);
458 if (strstr(secstrings + s->sh_name, ".init"))
459 init_plts += count_plts(rels, numrels);
460 else
461 core_plts += count_plts(rels, numrels);
462 }
463
464 mod->arch.core_plt->sh_type = SHT_NOBITS;
465 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
466 mod->arch.core_plt->sh_addralign = 16;
467 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
468 mod->arch.init_plt->sh_type = SHT_NOBITS;
469 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
470 mod->arch.init_plt->sh_addralign = 16;
471 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
472 mod->arch.got->sh_type = SHT_NOBITS;
473 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
474 mod->arch.got->sh_addralign = 8;
475 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
476 mod->arch.opd->sh_type = SHT_NOBITS;
477 mod->arch.opd->sh_flags = SHF_ALLOC;
478 mod->arch.opd->sh_addralign = 8;
479 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
480 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
481 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
482 mod->arch.got->sh_size, mod->arch.opd->sh_size);
483 return 0;
484}
485
486static inline int
487in_init (const struct module *mod, uint64_t addr)
488{
489 return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
490}
491
492static inline int
493in_core (const struct module *mod, uint64_t addr)
494{
495 return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
496}
497
498static inline int
499is_internal (const struct module *mod, uint64_t value)
500{
501 return in_init(mod, value) || in_core(mod, value);
502}
503
504/*
505 * Get gp-relative offset for the linkage-table entry of VALUE.
506 */
507static uint64_t
508get_ltoff (struct module *mod, uint64_t value, int *okp)
509{
510 struct got_entry *got, *e;
511
512 if (!*okp)
513 return 0;
514
515 got = (void *) mod->arch.got->sh_addr;
516 for (e = got; e < got + mod->arch.next_got_entry; ++e)
517 if (e->val == value)
518 goto found;
519
520 /* Not enough GOT entries? */
521 BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
522
523 e->val = value;
524 ++mod->arch.next_got_entry;
525 found:
526 return (uint64_t) e - mod->arch.gp;
527}
528
529static inline int
530gp_addressable (struct module *mod, uint64_t value)
531{
532 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
533}
534
535/* Get PC-relative PLT entry for this value. Returns 0 on failure. */
536static uint64_t
537get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
538{
539 struct plt_entry *plt, *plt_end;
540 uint64_t target_ip, target_gp;
541
542 if (!*okp)
543 return 0;
544
545 if (in_init(mod, (uint64_t) insn)) {
546 plt = (void *) mod->arch.init_plt->sh_addr;
547 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
548 } else {
549 plt = (void *) mod->arch.core_plt->sh_addr;
550 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
551 }
552
553 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
554 target_ip = ((uint64_t *) value)[0];
555 target_gp = ((uint64_t *) value)[1];
556
557 /* Look for existing PLT entry. */
558 while (plt->bundle[0][0]) {
559 if (plt_target(plt) == target_ip)
560 goto found;
561 if (++plt >= plt_end)
562 BUG();
563 }
564 *plt = ia64_plt_template;
565 if (!patch_plt(mod, plt, target_ip, target_gp)) {
566 *okp = 0;
567 return 0;
568 }
569#if ARCH_MODULE_DEBUG
570 if (plt_target(plt) != target_ip) {
571 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
572 __func__, target_ip, plt_target(plt));
573 *okp = 0;
574 return 0;
575 }
576#endif
577 found:
578 return (uint64_t) plt;
579}
580
581/* Get function descriptor for VALUE. */
582static uint64_t
583get_fdesc (struct module *mod, uint64_t value, int *okp)
584{
585 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
586
587 if (!*okp)
588 return 0;
589
590 if (!value) {
591 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
592 return 0;
593 }
594
595 if (!is_internal(mod, value))
596 /*
597 * If it's not a module-local entry-point, "value" already points to a
598 * function-descriptor.
599 */
600 return value;
601
602 /* Look for existing function descriptor. */
603 while (fdesc->ip) {
604 if (fdesc->ip == value)
605 return (uint64_t)fdesc;
606 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
607 BUG();
608 }
609
610 /* Create new one */
611 fdesc->ip = value;
612 fdesc->gp = mod->arch.gp;
613 return (uint64_t) fdesc;
614}
615
616static inline int
617do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
618 Elf64_Shdr *sec, void *location)
619{
620 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
621 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
622 uint64_t val;
623 int ok = 1;
624
625 val = sym->st_value + addend;
626
627 switch (formula) {
628 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
629 case RV_DIRECT:
630 break;
631
632 case RV_GPREL: val -= mod->arch.gp; break;
633 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
634 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
635 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
636 case RV_SECREL: val -= sec->sh_addr; break;
637 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
638
639 case RV_PCREL:
640 switch (r_type) {
641 case R_IA64_PCREL21B:
642 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
643 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
644 /*
645 * Init section may have been allocated far away from core,
646 * if the branch won't reach, then allocate a plt for it.
647 */
648 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
649 if (delta + (1 << 20) >= (1 << 21)) {
650 val = get_fdesc(mod, val, &ok);
651 val = get_plt(mod, location, val, &ok);
652 }
653 } else if (!is_internal(mod, val))
654 val = get_plt(mod, location, val, &ok);
655 /* FALL THROUGH */
656 default:
657 val -= bundle(location);
658 break;
659
660 case R_IA64_PCREL32MSB:
661 case R_IA64_PCREL32LSB:
662 case R_IA64_PCREL64MSB:
663 case R_IA64_PCREL64LSB:
664 val -= (uint64_t) location;
665 break;
666
667 }
668 switch (r_type) {
669 case R_IA64_PCREL60B: format = RF_INSN60; break;
670 case R_IA64_PCREL21B: format = RF_INSN21B; break;
671 case R_IA64_PCREL21M: format = RF_INSN21M; break;
672 case R_IA64_PCREL21F: format = RF_INSN21F; break;
673 default: break;
674 }
675 break;
676
677 case RV_BDREL:
678 val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
679 break;
680
681 case RV_LTV:
682 /* can link-time value relocs happen here? */
683 BUG();
684 break;
685
686 case RV_PCREL2:
687 if (r_type == R_IA64_PCREL21BI) {
688 if (!is_internal(mod, val)) {
689 printk(KERN_ERR "%s: %s reloc against "
690 "non-local symbol (%lx)\n", __func__,
691 reloc_name[r_type], (unsigned long)val);
692 return -ENOEXEC;
693 }
694 format = RF_INSN21B;
695 }
696 val -= bundle(location);
697 break;
698
699 case RV_SPECIAL:
700 switch (r_type) {
701 case R_IA64_IPLTMSB:
702 case R_IA64_IPLTLSB:
703 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
704 format = RF_64LSB;
705 if (r_type == R_IA64_IPLTMSB)
706 format = RF_64MSB;
707 break;
708
709 case R_IA64_SUB:
710 val = addend - sym->st_value;
711 format = RF_INSN64;
712 break;
713
714 case R_IA64_LTOFF22X:
715 if (gp_addressable(mod, val))
716 val -= mod->arch.gp;
717 else
718 val = get_ltoff(mod, val, &ok);
719 format = RF_INSN22;
720 break;
721
722 case R_IA64_LDXMOV:
723 if (gp_addressable(mod, val)) {
724 /* turn "ld8" into "mov": */
725 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
726 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
727 }
728 return 0;
729
730 default:
731 if (reloc_name[r_type])
732 printk(KERN_ERR "%s: special reloc %s not supported",
733 mod->name, reloc_name[r_type]);
734 else
735 printk(KERN_ERR "%s: unknown special reloc %x\n",
736 mod->name, r_type);
737 return -ENOEXEC;
738 }
739 break;
740
741 case RV_TPREL:
742 case RV_LTREL_TPREL:
743 case RV_DTPMOD:
744 case RV_LTREL_DTPMOD:
745 case RV_DTPREL:
746 case RV_LTREL_DTPREL:
747 printk(KERN_ERR "%s: %s reloc not supported\n",
748 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
749 return -ENOEXEC;
750
751 default:
752 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
753 return -ENOEXEC;
754 }
755
756 if (!ok)
757 return -ENOEXEC;
758
759 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
760 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
761
762 switch (format) {
763 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
764 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
765 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
766 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
767 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
768 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
769 case RF_32MSB: /* ia64 Linux is little-endian... */
770 case RF_64MSB: /* ia64 Linux is little-endian... */
771 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
772 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
773 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
774 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
775 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
776 return -ENOEXEC;
777
778 default:
779 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
780 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
781 return -ENOEXEC;
782 }
783 return ok ? 0 : -ENOEXEC;
784}
785
786int
787apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
788 unsigned int relsec, struct module *mod)
789{
790 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
791 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
792 Elf64_Shdr *target_sec;
793 int ret;
794
795 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
796 relsec, n, sechdrs[relsec].sh_info);
797
798 target_sec = sechdrs + sechdrs[relsec].sh_info;
799
800 if (target_sec->sh_entsize == ~0UL)
801 /*
802 * If target section wasn't allocated, we don't need to relocate it.
803 * Happens, e.g., for debug sections.
804 */
805 return 0;
806
807 if (!mod->arch.gp) {
808 /*
809 * XXX Should have an arch-hook for running this after final section
810 * addresses have been selected...
811 */
812 uint64_t gp;
813 if (mod->core_layout.size > MAX_LTOFF)
814 /*
815 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
816 * at the end of the module.
817 */
818 gp = mod->core_layout.size - MAX_LTOFF / 2;
819 else
820 gp = mod->core_layout.size / 2;
821 gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
822 mod->arch.gp = gp;
823 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
824 }
825
826 for (i = 0; i < n; i++) {
827 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
828 ((Elf64_Sym *) sechdrs[symindex].sh_addr
829 + ELF64_R_SYM(rela[i].r_info)),
830 rela[i].r_addend, target_sec,
831 (void *) target_sec->sh_addr + rela[i].r_offset);
832 if (ret < 0)
833 return ret;
834 }
835 return 0;
836}
837
838/*
839 * Modules contain a single unwind table which covers both the core and the init text
840 * sections but since the two are not contiguous, we need to split this table up such that
841 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
842 * more complicated than it really is.
843 */
844static void
845register_unwind_table (struct module *mod)
846{
847 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
848 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
849 struct unw_table_entry tmp, *e1, *e2, *core, *init;
850 unsigned long num_init = 0, num_core = 0;
851
852 /* First, count how many init and core unwind-table entries there are. */
853 for (e1 = start; e1 < end; ++e1)
854 if (in_init(mod, e1->start_offset))
855 ++num_init;
856 else
857 ++num_core;
858 /*
859 * Second, sort the table such that all unwind-table entries for the init and core
860 * text sections are nicely separated. We do this with a stupid bubble sort
861 * (unwind tables don't get ridiculously huge).
862 */
863 for (e1 = start; e1 < end; ++e1) {
864 for (e2 = e1 + 1; e2 < end; ++e2) {
865 if (e2->start_offset < e1->start_offset) {
866 tmp = *e1;
867 *e1 = *e2;
868 *e2 = tmp;
869 }
870 }
871 }
872 /*
873 * Third, locate the init and core segments in the unwind table:
874 */
875 if (in_init(mod, start->start_offset)) {
876 init = start;
877 core = start + num_init;
878 } else {
879 core = start;
880 init = start + num_core;
881 }
882
883 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
884 mod->name, mod->arch.gp, num_init, num_core);
885
886 /*
887 * Fourth, register both tables (if not empty).
888 */
889 if (num_core > 0) {
890 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
891 core, core + num_core);
892 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
893 mod->arch.core_unw_table, core, core + num_core);
894 }
895 if (num_init > 0) {
896 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
897 init, init + num_init);
898 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
899 mod->arch.init_unw_table, init, init + num_init);
900 }
901}
902
903int
904module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
905{
906 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
907 if (mod->arch.unwind)
908 register_unwind_table(mod);
909 return 0;
910}
911
912void
913module_arch_cleanup (struct module *mod)
914{
915 if (mod->arch.init_unw_table)
916 unw_remove_unwind_table(mod->arch.init_unw_table);
917 if (mod->arch.core_unw_table)
918 unw_remove_unwind_table(mod->arch.core_unw_table);
919}