Loading...
1#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
9#include "demangle-java.h"
10#include "machine.h"
11#include "vdso.h"
12#include <symbol/kallsyms.h>
13#include "debug.h"
14
15#ifndef EM_AARCH64
16#define EM_AARCH64 183 /* ARM 64 bit */
17#endif
18
19
20#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
21extern char *cplus_demangle(const char *, int);
22
23static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
24{
25 return cplus_demangle(c, i);
26}
27#else
28#ifdef NO_DEMANGLE
29static inline char *bfd_demangle(void __maybe_unused *v,
30 const char __maybe_unused *c,
31 int __maybe_unused i)
32{
33 return NULL;
34}
35#else
36#define PACKAGE 'perf'
37#include <bfd.h>
38#endif
39#endif
40
41#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
42static int elf_getphdrnum(Elf *elf, size_t *dst)
43{
44 GElf_Ehdr gehdr;
45 GElf_Ehdr *ehdr;
46
47 ehdr = gelf_getehdr(elf, &gehdr);
48 if (!ehdr)
49 return -1;
50
51 *dst = ehdr->e_phnum;
52
53 return 0;
54}
55#endif
56
57#ifndef NT_GNU_BUILD_ID
58#define NT_GNU_BUILD_ID 3
59#endif
60
61/**
62 * elf_symtab__for_each_symbol - iterate thru all the symbols
63 *
64 * @syms: struct elf_symtab instance to iterate
65 * @idx: uint32_t idx
66 * @sym: GElf_Sym iterator
67 */
68#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
69 for (idx = 0, gelf_getsym(syms, idx, &sym);\
70 idx < nr_syms; \
71 idx++, gelf_getsym(syms, idx, &sym))
72
73static inline uint8_t elf_sym__type(const GElf_Sym *sym)
74{
75 return GELF_ST_TYPE(sym->st_info);
76}
77
78#ifndef STT_GNU_IFUNC
79#define STT_GNU_IFUNC 10
80#endif
81
82static inline int elf_sym__is_function(const GElf_Sym *sym)
83{
84 return (elf_sym__type(sym) == STT_FUNC ||
85 elf_sym__type(sym) == STT_GNU_IFUNC) &&
86 sym->st_name != 0 &&
87 sym->st_shndx != SHN_UNDEF;
88}
89
90static inline bool elf_sym__is_object(const GElf_Sym *sym)
91{
92 return elf_sym__type(sym) == STT_OBJECT &&
93 sym->st_name != 0 &&
94 sym->st_shndx != SHN_UNDEF;
95}
96
97static inline int elf_sym__is_label(const GElf_Sym *sym)
98{
99 return elf_sym__type(sym) == STT_NOTYPE &&
100 sym->st_name != 0 &&
101 sym->st_shndx != SHN_UNDEF &&
102 sym->st_shndx != SHN_ABS;
103}
104
105static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
106{
107 switch (type) {
108 case MAP__FUNCTION:
109 return elf_sym__is_function(sym);
110 case MAP__VARIABLE:
111 return elf_sym__is_object(sym);
112 default:
113 return false;
114 }
115}
116
117static inline const char *elf_sym__name(const GElf_Sym *sym,
118 const Elf_Data *symstrs)
119{
120 return symstrs->d_buf + sym->st_name;
121}
122
123static inline const char *elf_sec__name(const GElf_Shdr *shdr,
124 const Elf_Data *secstrs)
125{
126 return secstrs->d_buf + shdr->sh_name;
127}
128
129static inline int elf_sec__is_text(const GElf_Shdr *shdr,
130 const Elf_Data *secstrs)
131{
132 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
133}
134
135static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
136 const Elf_Data *secstrs)
137{
138 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
139}
140
141static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
142 enum map_type type)
143{
144 switch (type) {
145 case MAP__FUNCTION:
146 return elf_sec__is_text(shdr, secstrs);
147 case MAP__VARIABLE:
148 return elf_sec__is_data(shdr, secstrs);
149 default:
150 return false;
151 }
152}
153
154static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
155{
156 Elf_Scn *sec = NULL;
157 GElf_Shdr shdr;
158 size_t cnt = 1;
159
160 while ((sec = elf_nextscn(elf, sec)) != NULL) {
161 gelf_getshdr(sec, &shdr);
162
163 if ((addr >= shdr.sh_addr) &&
164 (addr < (shdr.sh_addr + shdr.sh_size)))
165 return cnt;
166
167 ++cnt;
168 }
169
170 return -1;
171}
172
173Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
174 GElf_Shdr *shp, const char *name, size_t *idx)
175{
176 Elf_Scn *sec = NULL;
177 size_t cnt = 1;
178
179 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
180 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
181 return NULL;
182
183 while ((sec = elf_nextscn(elf, sec)) != NULL) {
184 char *str;
185
186 gelf_getshdr(sec, shp);
187 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
188 if (str && !strcmp(name, str)) {
189 if (idx)
190 *idx = cnt;
191 return sec;
192 }
193 ++cnt;
194 }
195
196 return NULL;
197}
198
199#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
200 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
201 idx < nr_entries; \
202 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
203
204#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
205 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
206 idx < nr_entries; \
207 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
208
209/*
210 * We need to check if we have a .dynsym, so that we can handle the
211 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
212 * .dynsym or .symtab).
213 * And always look at the original dso, not at debuginfo packages, that
214 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
215 */
216int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
217 symbol_filter_t filter)
218{
219 uint32_t nr_rel_entries, idx;
220 GElf_Sym sym;
221 u64 plt_offset;
222 GElf_Shdr shdr_plt;
223 struct symbol *f;
224 GElf_Shdr shdr_rel_plt, shdr_dynsym;
225 Elf_Data *reldata, *syms, *symstrs;
226 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
227 size_t dynsym_idx;
228 GElf_Ehdr ehdr;
229 char sympltname[1024];
230 Elf *elf;
231 int nr = 0, symidx, err = 0;
232
233 if (!ss->dynsym)
234 return 0;
235
236 elf = ss->elf;
237 ehdr = ss->ehdr;
238
239 scn_dynsym = ss->dynsym;
240 shdr_dynsym = ss->dynshdr;
241 dynsym_idx = ss->dynsym_idx;
242
243 if (scn_dynsym == NULL)
244 goto out_elf_end;
245
246 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
247 ".rela.plt", NULL);
248 if (scn_plt_rel == NULL) {
249 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
250 ".rel.plt", NULL);
251 if (scn_plt_rel == NULL)
252 goto out_elf_end;
253 }
254
255 err = -1;
256
257 if (shdr_rel_plt.sh_link != dynsym_idx)
258 goto out_elf_end;
259
260 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
261 goto out_elf_end;
262
263 /*
264 * Fetch the relocation section to find the idxes to the GOT
265 * and the symbols in the .dynsym they refer to.
266 */
267 reldata = elf_getdata(scn_plt_rel, NULL);
268 if (reldata == NULL)
269 goto out_elf_end;
270
271 syms = elf_getdata(scn_dynsym, NULL);
272 if (syms == NULL)
273 goto out_elf_end;
274
275 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
276 if (scn_symstrs == NULL)
277 goto out_elf_end;
278
279 symstrs = elf_getdata(scn_symstrs, NULL);
280 if (symstrs == NULL)
281 goto out_elf_end;
282
283 if (symstrs->d_size == 0)
284 goto out_elf_end;
285
286 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
287 plt_offset = shdr_plt.sh_offset;
288
289 if (shdr_rel_plt.sh_type == SHT_RELA) {
290 GElf_Rela pos_mem, *pos;
291
292 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
293 nr_rel_entries) {
294 symidx = GELF_R_SYM(pos->r_info);
295 plt_offset += shdr_plt.sh_entsize;
296 gelf_getsym(syms, symidx, &sym);
297 snprintf(sympltname, sizeof(sympltname),
298 "%s@plt", elf_sym__name(&sym, symstrs));
299
300 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
301 STB_GLOBAL, sympltname);
302 if (!f)
303 goto out_elf_end;
304
305 if (filter && filter(map, f))
306 symbol__delete(f);
307 else {
308 symbols__insert(&dso->symbols[map->type], f);
309 ++nr;
310 }
311 }
312 } else if (shdr_rel_plt.sh_type == SHT_REL) {
313 GElf_Rel pos_mem, *pos;
314 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
315 nr_rel_entries) {
316 symidx = GELF_R_SYM(pos->r_info);
317 plt_offset += shdr_plt.sh_entsize;
318 gelf_getsym(syms, symidx, &sym);
319 snprintf(sympltname, sizeof(sympltname),
320 "%s@plt", elf_sym__name(&sym, symstrs));
321
322 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
323 STB_GLOBAL, sympltname);
324 if (!f)
325 goto out_elf_end;
326
327 if (filter && filter(map, f))
328 symbol__delete(f);
329 else {
330 symbols__insert(&dso->symbols[map->type], f);
331 ++nr;
332 }
333 }
334 }
335
336 err = 0;
337out_elf_end:
338 if (err == 0)
339 return nr;
340 pr_debug("%s: problems reading %s PLT info.\n",
341 __func__, dso->long_name);
342 return 0;
343}
344
345/*
346 * Align offset to 4 bytes as needed for note name and descriptor data.
347 */
348#define NOTE_ALIGN(n) (((n) + 3) & -4U)
349
350static int elf_read_build_id(Elf *elf, void *bf, size_t size)
351{
352 int err = -1;
353 GElf_Ehdr ehdr;
354 GElf_Shdr shdr;
355 Elf_Data *data;
356 Elf_Scn *sec;
357 Elf_Kind ek;
358 void *ptr;
359
360 if (size < BUILD_ID_SIZE)
361 goto out;
362
363 ek = elf_kind(elf);
364 if (ek != ELF_K_ELF)
365 goto out;
366
367 if (gelf_getehdr(elf, &ehdr) == NULL) {
368 pr_err("%s: cannot get elf header.\n", __func__);
369 goto out;
370 }
371
372 /*
373 * Check following sections for notes:
374 * '.note.gnu.build-id'
375 * '.notes'
376 * '.note' (VDSO specific)
377 */
378 do {
379 sec = elf_section_by_name(elf, &ehdr, &shdr,
380 ".note.gnu.build-id", NULL);
381 if (sec)
382 break;
383
384 sec = elf_section_by_name(elf, &ehdr, &shdr,
385 ".notes", NULL);
386 if (sec)
387 break;
388
389 sec = elf_section_by_name(elf, &ehdr, &shdr,
390 ".note", NULL);
391 if (sec)
392 break;
393
394 return err;
395
396 } while (0);
397
398 data = elf_getdata(sec, NULL);
399 if (data == NULL)
400 goto out;
401
402 ptr = data->d_buf;
403 while (ptr < (data->d_buf + data->d_size)) {
404 GElf_Nhdr *nhdr = ptr;
405 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
406 descsz = NOTE_ALIGN(nhdr->n_descsz);
407 const char *name;
408
409 ptr += sizeof(*nhdr);
410 name = ptr;
411 ptr += namesz;
412 if (nhdr->n_type == NT_GNU_BUILD_ID &&
413 nhdr->n_namesz == sizeof("GNU")) {
414 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
415 size_t sz = min(size, descsz);
416 memcpy(bf, ptr, sz);
417 memset(bf + sz, 0, size - sz);
418 err = descsz;
419 break;
420 }
421 }
422 ptr += descsz;
423 }
424
425out:
426 return err;
427}
428
429int filename__read_build_id(const char *filename, void *bf, size_t size)
430{
431 int fd, err = -1;
432 Elf *elf;
433
434 if (size < BUILD_ID_SIZE)
435 goto out;
436
437 fd = open(filename, O_RDONLY);
438 if (fd < 0)
439 goto out;
440
441 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
442 if (elf == NULL) {
443 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
444 goto out_close;
445 }
446
447 err = elf_read_build_id(elf, bf, size);
448
449 elf_end(elf);
450out_close:
451 close(fd);
452out:
453 return err;
454}
455
456int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
457{
458 int fd, err = -1;
459
460 if (size < BUILD_ID_SIZE)
461 goto out;
462
463 fd = open(filename, O_RDONLY);
464 if (fd < 0)
465 goto out;
466
467 while (1) {
468 char bf[BUFSIZ];
469 GElf_Nhdr nhdr;
470 size_t namesz, descsz;
471
472 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
473 break;
474
475 namesz = NOTE_ALIGN(nhdr.n_namesz);
476 descsz = NOTE_ALIGN(nhdr.n_descsz);
477 if (nhdr.n_type == NT_GNU_BUILD_ID &&
478 nhdr.n_namesz == sizeof("GNU")) {
479 if (read(fd, bf, namesz) != (ssize_t)namesz)
480 break;
481 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
482 size_t sz = min(descsz, size);
483 if (read(fd, build_id, sz) == (ssize_t)sz) {
484 memset(build_id + sz, 0, size - sz);
485 err = 0;
486 break;
487 }
488 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
489 break;
490 } else {
491 int n = namesz + descsz;
492 if (read(fd, bf, n) != n)
493 break;
494 }
495 }
496 close(fd);
497out:
498 return err;
499}
500
501int filename__read_debuglink(const char *filename, char *debuglink,
502 size_t size)
503{
504 int fd, err = -1;
505 Elf *elf;
506 GElf_Ehdr ehdr;
507 GElf_Shdr shdr;
508 Elf_Data *data;
509 Elf_Scn *sec;
510 Elf_Kind ek;
511
512 fd = open(filename, O_RDONLY);
513 if (fd < 0)
514 goto out;
515
516 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
517 if (elf == NULL) {
518 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
519 goto out_close;
520 }
521
522 ek = elf_kind(elf);
523 if (ek != ELF_K_ELF)
524 goto out_elf_end;
525
526 if (gelf_getehdr(elf, &ehdr) == NULL) {
527 pr_err("%s: cannot get elf header.\n", __func__);
528 goto out_elf_end;
529 }
530
531 sec = elf_section_by_name(elf, &ehdr, &shdr,
532 ".gnu_debuglink", NULL);
533 if (sec == NULL)
534 goto out_elf_end;
535
536 data = elf_getdata(sec, NULL);
537 if (data == NULL)
538 goto out_elf_end;
539
540 /* the start of this section is a zero-terminated string */
541 strncpy(debuglink, data->d_buf, size);
542
543 err = 0;
544
545out_elf_end:
546 elf_end(elf);
547out_close:
548 close(fd);
549out:
550 return err;
551}
552
553static int dso__swap_init(struct dso *dso, unsigned char eidata)
554{
555 static unsigned int const endian = 1;
556
557 dso->needs_swap = DSO_SWAP__NO;
558
559 switch (eidata) {
560 case ELFDATA2LSB:
561 /* We are big endian, DSO is little endian. */
562 if (*(unsigned char const *)&endian != 1)
563 dso->needs_swap = DSO_SWAP__YES;
564 break;
565
566 case ELFDATA2MSB:
567 /* We are little endian, DSO is big endian. */
568 if (*(unsigned char const *)&endian != 0)
569 dso->needs_swap = DSO_SWAP__YES;
570 break;
571
572 default:
573 pr_err("unrecognized DSO data encoding %d\n", eidata);
574 return -EINVAL;
575 }
576
577 return 0;
578}
579
580static int decompress_kmodule(struct dso *dso, const char *name,
581 enum dso_binary_type type)
582{
583 int fd = -1;
584 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
585 struct kmod_path m;
586
587 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
588 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
589 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
590 return -1;
591
592 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
593 name = dso->long_name;
594
595 if (kmod_path__parse_ext(&m, name) || !m.comp)
596 return -1;
597
598 fd = mkstemp(tmpbuf);
599 if (fd < 0) {
600 dso->load_errno = errno;
601 goto out;
602 }
603
604 if (!decompress_to_file(m.ext, name, fd)) {
605 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
606 close(fd);
607 fd = -1;
608 }
609
610 unlink(tmpbuf);
611
612out:
613 free(m.ext);
614 return fd;
615}
616
617bool symsrc__possibly_runtime(struct symsrc *ss)
618{
619 return ss->dynsym || ss->opdsec;
620}
621
622bool symsrc__has_symtab(struct symsrc *ss)
623{
624 return ss->symtab != NULL;
625}
626
627void symsrc__destroy(struct symsrc *ss)
628{
629 zfree(&ss->name);
630 elf_end(ss->elf);
631 close(ss->fd);
632}
633
634bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
635{
636 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
637}
638
639int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
640 enum dso_binary_type type)
641{
642 int err = -1;
643 GElf_Ehdr ehdr;
644 Elf *elf;
645 int fd;
646
647 if (dso__needs_decompress(dso)) {
648 fd = decompress_kmodule(dso, name, type);
649 if (fd < 0)
650 return -1;
651 } else {
652 fd = open(name, O_RDONLY);
653 if (fd < 0) {
654 dso->load_errno = errno;
655 return -1;
656 }
657 }
658
659 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
660 if (elf == NULL) {
661 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
662 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
663 goto out_close;
664 }
665
666 if (gelf_getehdr(elf, &ehdr) == NULL) {
667 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
668 pr_debug("%s: cannot get elf header.\n", __func__);
669 goto out_elf_end;
670 }
671
672 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
673 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
674 goto out_elf_end;
675 }
676
677 /* Always reject images with a mismatched build-id: */
678 if (dso->has_build_id) {
679 u8 build_id[BUILD_ID_SIZE];
680
681 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
682 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
683 goto out_elf_end;
684 }
685
686 if (!dso__build_id_equal(dso, build_id)) {
687 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
688 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
689 goto out_elf_end;
690 }
691 }
692
693 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
694
695 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
696 NULL);
697 if (ss->symshdr.sh_type != SHT_SYMTAB)
698 ss->symtab = NULL;
699
700 ss->dynsym_idx = 0;
701 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
702 &ss->dynsym_idx);
703 if (ss->dynshdr.sh_type != SHT_DYNSYM)
704 ss->dynsym = NULL;
705
706 ss->opdidx = 0;
707 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
708 &ss->opdidx);
709 if (ss->opdshdr.sh_type != SHT_PROGBITS)
710 ss->opdsec = NULL;
711
712 if (dso->kernel == DSO_TYPE_USER) {
713 GElf_Shdr shdr;
714 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
715 ehdr.e_type == ET_REL ||
716 dso__is_vdso(dso) ||
717 elf_section_by_name(elf, &ehdr, &shdr,
718 ".gnu.prelink_undo",
719 NULL) != NULL);
720 } else {
721 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
722 }
723
724 ss->name = strdup(name);
725 if (!ss->name) {
726 dso->load_errno = errno;
727 goto out_elf_end;
728 }
729
730 ss->elf = elf;
731 ss->fd = fd;
732 ss->ehdr = ehdr;
733 ss->type = type;
734
735 return 0;
736
737out_elf_end:
738 elf_end(elf);
739out_close:
740 close(fd);
741 return err;
742}
743
744/**
745 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
746 * @kmap: kernel maps and relocation reference symbol
747 *
748 * This function returns %true if we are dealing with the kernel maps and the
749 * relocation reference symbol has not yet been found. Otherwise %false is
750 * returned.
751 */
752static bool ref_reloc_sym_not_found(struct kmap *kmap)
753{
754 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
755 !kmap->ref_reloc_sym->unrelocated_addr;
756}
757
758/**
759 * ref_reloc - kernel relocation offset.
760 * @kmap: kernel maps and relocation reference symbol
761 *
762 * This function returns the offset of kernel addresses as determined by using
763 * the relocation reference symbol i.e. if the kernel has not been relocated
764 * then the return value is zero.
765 */
766static u64 ref_reloc(struct kmap *kmap)
767{
768 if (kmap && kmap->ref_reloc_sym &&
769 kmap->ref_reloc_sym->unrelocated_addr)
770 return kmap->ref_reloc_sym->addr -
771 kmap->ref_reloc_sym->unrelocated_addr;
772 return 0;
773}
774
775static bool want_demangle(bool is_kernel_sym)
776{
777 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
778}
779
780void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
781
782int dso__load_sym(struct dso *dso, struct map *map,
783 struct symsrc *syms_ss, struct symsrc *runtime_ss,
784 symbol_filter_t filter, int kmodule)
785{
786 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
787 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
788 struct map *curr_map = map;
789 struct dso *curr_dso = dso;
790 Elf_Data *symstrs, *secstrs;
791 uint32_t nr_syms;
792 int err = -1;
793 uint32_t idx;
794 GElf_Ehdr ehdr;
795 GElf_Shdr shdr;
796 GElf_Shdr tshdr;
797 Elf_Data *syms, *opddata = NULL;
798 GElf_Sym sym;
799 Elf_Scn *sec, *sec_strndx;
800 Elf *elf;
801 int nr = 0;
802 bool remap_kernel = false, adjust_kernel_syms = false;
803
804 if (kmap && !kmaps)
805 return -1;
806
807 dso->symtab_type = syms_ss->type;
808 dso->is_64_bit = syms_ss->is_64_bit;
809 dso->rel = syms_ss->ehdr.e_type == ET_REL;
810
811 /*
812 * Modules may already have symbols from kallsyms, but those symbols
813 * have the wrong values for the dso maps, so remove them.
814 */
815 if (kmodule && syms_ss->symtab)
816 symbols__delete(&dso->symbols[map->type]);
817
818 if (!syms_ss->symtab) {
819 /*
820 * If the vmlinux is stripped, fail so we will fall back
821 * to using kallsyms. The vmlinux runtime symbols aren't
822 * of much use.
823 */
824 if (dso->kernel)
825 goto out_elf_end;
826
827 syms_ss->symtab = syms_ss->dynsym;
828 syms_ss->symshdr = syms_ss->dynshdr;
829 }
830
831 elf = syms_ss->elf;
832 ehdr = syms_ss->ehdr;
833 sec = syms_ss->symtab;
834 shdr = syms_ss->symshdr;
835
836 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
837 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
838
839 if (runtime_ss->opdsec)
840 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
841
842 syms = elf_getdata(sec, NULL);
843 if (syms == NULL)
844 goto out_elf_end;
845
846 sec = elf_getscn(elf, shdr.sh_link);
847 if (sec == NULL)
848 goto out_elf_end;
849
850 symstrs = elf_getdata(sec, NULL);
851 if (symstrs == NULL)
852 goto out_elf_end;
853
854 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
855 if (sec_strndx == NULL)
856 goto out_elf_end;
857
858 secstrs = elf_getdata(sec_strndx, NULL);
859 if (secstrs == NULL)
860 goto out_elf_end;
861
862 nr_syms = shdr.sh_size / shdr.sh_entsize;
863
864 memset(&sym, 0, sizeof(sym));
865
866 /*
867 * The kernel relocation symbol is needed in advance in order to adjust
868 * kernel maps correctly.
869 */
870 if (ref_reloc_sym_not_found(kmap)) {
871 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
872 const char *elf_name = elf_sym__name(&sym, symstrs);
873
874 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
875 continue;
876 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
877 map->reloc = kmap->ref_reloc_sym->addr -
878 kmap->ref_reloc_sym->unrelocated_addr;
879 break;
880 }
881 }
882
883 /*
884 * Handle any relocation of vdso necessary because older kernels
885 * attempted to prelink vdso to its virtual address.
886 */
887 if (dso__is_vdso(dso))
888 map->reloc = map->start - dso->text_offset;
889
890 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
891 /*
892 * Initial kernel and module mappings do not map to the dso. For
893 * function mappings, flag the fixups.
894 */
895 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
896 remap_kernel = true;
897 adjust_kernel_syms = dso->adjust_symbols;
898 }
899 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
900 struct symbol *f;
901 const char *elf_name = elf_sym__name(&sym, symstrs);
902 char *demangled = NULL;
903 int is_label = elf_sym__is_label(&sym);
904 const char *section_name;
905 bool used_opd = false;
906
907 if (!is_label && !elf_sym__is_a(&sym, map->type))
908 continue;
909
910 /* Reject ARM ELF "mapping symbols": these aren't unique and
911 * don't identify functions, so will confuse the profile
912 * output: */
913 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
914 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
915 && (elf_name[2] == '\0' || elf_name[2] == '.'))
916 continue;
917 }
918
919 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
920 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
921 u64 *opd = opddata->d_buf + offset;
922 sym.st_value = DSO__SWAP(dso, u64, *opd);
923 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
924 sym.st_value);
925 used_opd = true;
926 }
927 /*
928 * When loading symbols in a data mapping, ABS symbols (which
929 * has a value of SHN_ABS in its st_shndx) failed at
930 * elf_getscn(). And it marks the loading as a failure so
931 * already loaded symbols cannot be fixed up.
932 *
933 * I'm not sure what should be done. Just ignore them for now.
934 * - Namhyung Kim
935 */
936 if (sym.st_shndx == SHN_ABS)
937 continue;
938
939 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
940 if (!sec)
941 goto out_elf_end;
942
943 gelf_getshdr(sec, &shdr);
944
945 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
946 continue;
947
948 section_name = elf_sec__name(&shdr, secstrs);
949
950 /* On ARM, symbols for thumb functions have 1 added to
951 * the symbol address as a flag - remove it */
952 if ((ehdr.e_machine == EM_ARM) &&
953 (map->type == MAP__FUNCTION) &&
954 (sym.st_value & 1))
955 --sym.st_value;
956
957 arch__elf_sym_adjust(&sym);
958
959 if (dso->kernel || kmodule) {
960 char dso_name[PATH_MAX];
961
962 /* Adjust symbol to map to file offset */
963 if (adjust_kernel_syms)
964 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
965
966 if (strcmp(section_name,
967 (curr_dso->short_name +
968 dso->short_name_len)) == 0)
969 goto new_symbol;
970
971 if (strcmp(section_name, ".text") == 0) {
972 /*
973 * The initial kernel mapping is based on
974 * kallsyms and identity maps. Overwrite it to
975 * map to the kernel dso.
976 */
977 if (remap_kernel && dso->kernel) {
978 remap_kernel = false;
979 map->start = shdr.sh_addr +
980 ref_reloc(kmap);
981 map->end = map->start + shdr.sh_size;
982 map->pgoff = shdr.sh_offset;
983 map->map_ip = map__map_ip;
984 map->unmap_ip = map__unmap_ip;
985 /* Ensure maps are correctly ordered */
986 if (kmaps) {
987 map__get(map);
988 map_groups__remove(kmaps, map);
989 map_groups__insert(kmaps, map);
990 map__put(map);
991 }
992 }
993
994 /*
995 * The initial module mapping is based on
996 * /proc/modules mapped to offset zero.
997 * Overwrite it to map to the module dso.
998 */
999 if (remap_kernel && kmodule) {
1000 remap_kernel = false;
1001 map->pgoff = shdr.sh_offset;
1002 }
1003
1004 curr_map = map;
1005 curr_dso = dso;
1006 goto new_symbol;
1007 }
1008
1009 if (!kmap)
1010 goto new_symbol;
1011
1012 snprintf(dso_name, sizeof(dso_name),
1013 "%s%s", dso->short_name, section_name);
1014
1015 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
1016 if (curr_map == NULL) {
1017 u64 start = sym.st_value;
1018
1019 if (kmodule)
1020 start += map->start + shdr.sh_offset;
1021
1022 curr_dso = dso__new(dso_name);
1023 if (curr_dso == NULL)
1024 goto out_elf_end;
1025 curr_dso->kernel = dso->kernel;
1026 curr_dso->long_name = dso->long_name;
1027 curr_dso->long_name_len = dso->long_name_len;
1028 curr_map = map__new2(start, curr_dso,
1029 map->type);
1030 dso__put(curr_dso);
1031 if (curr_map == NULL) {
1032 goto out_elf_end;
1033 }
1034 if (adjust_kernel_syms) {
1035 curr_map->start = shdr.sh_addr +
1036 ref_reloc(kmap);
1037 curr_map->end = curr_map->start +
1038 shdr.sh_size;
1039 curr_map->pgoff = shdr.sh_offset;
1040 } else {
1041 curr_map->map_ip = identity__map_ip;
1042 curr_map->unmap_ip = identity__map_ip;
1043 }
1044 curr_dso->symtab_type = dso->symtab_type;
1045 map_groups__insert(kmaps, curr_map);
1046 /*
1047 * Add it before we drop the referece to curr_map,
1048 * i.e. while we still are sure to have a reference
1049 * to this DSO via curr_map->dso.
1050 */
1051 dsos__add(&map->groups->machine->dsos, curr_dso);
1052 /* kmaps already got it */
1053 map__put(curr_map);
1054 dso__set_loaded(curr_dso, map->type);
1055 } else
1056 curr_dso = curr_map->dso;
1057
1058 goto new_symbol;
1059 }
1060
1061 if ((used_opd && runtime_ss->adjust_symbols)
1062 || (!used_opd && syms_ss->adjust_symbols)) {
1063 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1064 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1065 (u64)sym.st_value, (u64)shdr.sh_addr,
1066 (u64)shdr.sh_offset);
1067 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1068 }
1069new_symbol:
1070 /*
1071 * We need to figure out if the object was created from C++ sources
1072 * DWARF DW_compile_unit has this, but we don't always have access
1073 * to it...
1074 */
1075 if (want_demangle(dso->kernel || kmodule)) {
1076 int demangle_flags = DMGL_NO_OPTS;
1077 if (verbose)
1078 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1079
1080 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
1081 if (demangled == NULL)
1082 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
1083 if (demangled != NULL)
1084 elf_name = demangled;
1085 }
1086 f = symbol__new(sym.st_value, sym.st_size,
1087 GELF_ST_BIND(sym.st_info), elf_name);
1088 free(demangled);
1089 if (!f)
1090 goto out_elf_end;
1091
1092 if (filter && filter(curr_map, f))
1093 symbol__delete(f);
1094 else {
1095 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1096 nr++;
1097 }
1098 }
1099
1100 /*
1101 * For misannotated, zeroed, ASM function sizes.
1102 */
1103 if (nr > 0) {
1104 if (!symbol_conf.allow_aliases)
1105 symbols__fixup_duplicate(&dso->symbols[map->type]);
1106 symbols__fixup_end(&dso->symbols[map->type]);
1107 if (kmap) {
1108 /*
1109 * We need to fixup this here too because we create new
1110 * maps here, for things like vsyscall sections.
1111 */
1112 __map_groups__fixup_end(kmaps, map->type);
1113 }
1114 }
1115 err = nr;
1116out_elf_end:
1117 return err;
1118}
1119
1120static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1121{
1122 GElf_Phdr phdr;
1123 size_t i, phdrnum;
1124 int err;
1125 u64 sz;
1126
1127 if (elf_getphdrnum(elf, &phdrnum))
1128 return -1;
1129
1130 for (i = 0; i < phdrnum; i++) {
1131 if (gelf_getphdr(elf, i, &phdr) == NULL)
1132 return -1;
1133 if (phdr.p_type != PT_LOAD)
1134 continue;
1135 if (exe) {
1136 if (!(phdr.p_flags & PF_X))
1137 continue;
1138 } else {
1139 if (!(phdr.p_flags & PF_R))
1140 continue;
1141 }
1142 sz = min(phdr.p_memsz, phdr.p_filesz);
1143 if (!sz)
1144 continue;
1145 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1146 if (err)
1147 return err;
1148 }
1149 return 0;
1150}
1151
1152int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1153 bool *is_64_bit)
1154{
1155 int err;
1156 Elf *elf;
1157
1158 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1159 if (elf == NULL)
1160 return -1;
1161
1162 if (is_64_bit)
1163 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1164
1165 err = elf_read_maps(elf, exe, mapfn, data);
1166
1167 elf_end(elf);
1168 return err;
1169}
1170
1171enum dso_type dso__type_fd(int fd)
1172{
1173 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1174 GElf_Ehdr ehdr;
1175 Elf_Kind ek;
1176 Elf *elf;
1177
1178 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1179 if (elf == NULL)
1180 goto out;
1181
1182 ek = elf_kind(elf);
1183 if (ek != ELF_K_ELF)
1184 goto out_end;
1185
1186 if (gelf_getclass(elf) == ELFCLASS64) {
1187 dso_type = DSO__TYPE_64BIT;
1188 goto out_end;
1189 }
1190
1191 if (gelf_getehdr(elf, &ehdr) == NULL)
1192 goto out_end;
1193
1194 if (ehdr.e_machine == EM_X86_64)
1195 dso_type = DSO__TYPE_X32BIT;
1196 else
1197 dso_type = DSO__TYPE_32BIT;
1198out_end:
1199 elf_end(elf);
1200out:
1201 return dso_type;
1202}
1203
1204static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1205{
1206 ssize_t r;
1207 size_t n;
1208 int err = -1;
1209 char *buf = malloc(page_size);
1210
1211 if (buf == NULL)
1212 return -1;
1213
1214 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1215 goto out;
1216
1217 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1218 goto out;
1219
1220 while (len) {
1221 n = page_size;
1222 if (len < n)
1223 n = len;
1224 /* Use read because mmap won't work on proc files */
1225 r = read(from, buf, n);
1226 if (r < 0)
1227 goto out;
1228 if (!r)
1229 break;
1230 n = r;
1231 r = write(to, buf, n);
1232 if (r < 0)
1233 goto out;
1234 if ((size_t)r != n)
1235 goto out;
1236 len -= n;
1237 }
1238
1239 err = 0;
1240out:
1241 free(buf);
1242 return err;
1243}
1244
1245struct kcore {
1246 int fd;
1247 int elfclass;
1248 Elf *elf;
1249 GElf_Ehdr ehdr;
1250};
1251
1252static int kcore__open(struct kcore *kcore, const char *filename)
1253{
1254 GElf_Ehdr *ehdr;
1255
1256 kcore->fd = open(filename, O_RDONLY);
1257 if (kcore->fd == -1)
1258 return -1;
1259
1260 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1261 if (!kcore->elf)
1262 goto out_close;
1263
1264 kcore->elfclass = gelf_getclass(kcore->elf);
1265 if (kcore->elfclass == ELFCLASSNONE)
1266 goto out_end;
1267
1268 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1269 if (!ehdr)
1270 goto out_end;
1271
1272 return 0;
1273
1274out_end:
1275 elf_end(kcore->elf);
1276out_close:
1277 close(kcore->fd);
1278 return -1;
1279}
1280
1281static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1282 bool temp)
1283{
1284 kcore->elfclass = elfclass;
1285
1286 if (temp)
1287 kcore->fd = mkstemp(filename);
1288 else
1289 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1290 if (kcore->fd == -1)
1291 return -1;
1292
1293 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1294 if (!kcore->elf)
1295 goto out_close;
1296
1297 if (!gelf_newehdr(kcore->elf, elfclass))
1298 goto out_end;
1299
1300 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
1301
1302 return 0;
1303
1304out_end:
1305 elf_end(kcore->elf);
1306out_close:
1307 close(kcore->fd);
1308 unlink(filename);
1309 return -1;
1310}
1311
1312static void kcore__close(struct kcore *kcore)
1313{
1314 elf_end(kcore->elf);
1315 close(kcore->fd);
1316}
1317
1318static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1319{
1320 GElf_Ehdr *ehdr = &to->ehdr;
1321 GElf_Ehdr *kehdr = &from->ehdr;
1322
1323 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1324 ehdr->e_type = kehdr->e_type;
1325 ehdr->e_machine = kehdr->e_machine;
1326 ehdr->e_version = kehdr->e_version;
1327 ehdr->e_entry = 0;
1328 ehdr->e_shoff = 0;
1329 ehdr->e_flags = kehdr->e_flags;
1330 ehdr->e_phnum = count;
1331 ehdr->e_shentsize = 0;
1332 ehdr->e_shnum = 0;
1333 ehdr->e_shstrndx = 0;
1334
1335 if (from->elfclass == ELFCLASS32) {
1336 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1337 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1338 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1339 } else {
1340 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1341 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1342 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1343 }
1344
1345 if (!gelf_update_ehdr(to->elf, ehdr))
1346 return -1;
1347
1348 if (!gelf_newphdr(to->elf, count))
1349 return -1;
1350
1351 return 0;
1352}
1353
1354static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1355 u64 addr, u64 len)
1356{
1357 GElf_Phdr phdr = {
1358 .p_type = PT_LOAD,
1359 .p_flags = PF_R | PF_W | PF_X,
1360 .p_offset = offset,
1361 .p_vaddr = addr,
1362 .p_paddr = 0,
1363 .p_filesz = len,
1364 .p_memsz = len,
1365 .p_align = page_size,
1366 };
1367
1368 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
1369 return -1;
1370
1371 return 0;
1372}
1373
1374static off_t kcore__write(struct kcore *kcore)
1375{
1376 return elf_update(kcore->elf, ELF_C_WRITE);
1377}
1378
1379struct phdr_data {
1380 off_t offset;
1381 u64 addr;
1382 u64 len;
1383};
1384
1385struct kcore_copy_info {
1386 u64 stext;
1387 u64 etext;
1388 u64 first_symbol;
1389 u64 last_symbol;
1390 u64 first_module;
1391 u64 last_module_symbol;
1392 struct phdr_data kernel_map;
1393 struct phdr_data modules_map;
1394};
1395
1396static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1397 u64 start)
1398{
1399 struct kcore_copy_info *kci = arg;
1400
1401 if (!symbol_type__is_a(type, MAP__FUNCTION))
1402 return 0;
1403
1404 if (strchr(name, '[')) {
1405 if (start > kci->last_module_symbol)
1406 kci->last_module_symbol = start;
1407 return 0;
1408 }
1409
1410 if (!kci->first_symbol || start < kci->first_symbol)
1411 kci->first_symbol = start;
1412
1413 if (!kci->last_symbol || start > kci->last_symbol)
1414 kci->last_symbol = start;
1415
1416 if (!strcmp(name, "_stext")) {
1417 kci->stext = start;
1418 return 0;
1419 }
1420
1421 if (!strcmp(name, "_etext")) {
1422 kci->etext = start;
1423 return 0;
1424 }
1425
1426 return 0;
1427}
1428
1429static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1430 const char *dir)
1431{
1432 char kallsyms_filename[PATH_MAX];
1433
1434 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1435
1436 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1437 return -1;
1438
1439 if (kallsyms__parse(kallsyms_filename, kci,
1440 kcore_copy__process_kallsyms) < 0)
1441 return -1;
1442
1443 return 0;
1444}
1445
1446static int kcore_copy__process_modules(void *arg,
1447 const char *name __maybe_unused,
1448 u64 start)
1449{
1450 struct kcore_copy_info *kci = arg;
1451
1452 if (!kci->first_module || start < kci->first_module)
1453 kci->first_module = start;
1454
1455 return 0;
1456}
1457
1458static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1459 const char *dir)
1460{
1461 char modules_filename[PATH_MAX];
1462
1463 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1464
1465 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1466 return -1;
1467
1468 if (modules__parse(modules_filename, kci,
1469 kcore_copy__process_modules) < 0)
1470 return -1;
1471
1472 return 0;
1473}
1474
1475static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1476 u64 s, u64 e)
1477{
1478 if (p->addr || s < start || s >= end)
1479 return;
1480
1481 p->addr = s;
1482 p->offset = (s - start) + pgoff;
1483 p->len = e < end ? e - s : end - s;
1484}
1485
1486static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1487{
1488 struct kcore_copy_info *kci = data;
1489 u64 end = start + len;
1490
1491 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1492 kci->etext);
1493
1494 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1495 kci->last_module_symbol);
1496
1497 return 0;
1498}
1499
1500static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1501{
1502 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1503 return -1;
1504
1505 return 0;
1506}
1507
1508static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1509 Elf *elf)
1510{
1511 if (kcore_copy__parse_kallsyms(kci, dir))
1512 return -1;
1513
1514 if (kcore_copy__parse_modules(kci, dir))
1515 return -1;
1516
1517 if (kci->stext)
1518 kci->stext = round_down(kci->stext, page_size);
1519 else
1520 kci->stext = round_down(kci->first_symbol, page_size);
1521
1522 if (kci->etext) {
1523 kci->etext = round_up(kci->etext, page_size);
1524 } else if (kci->last_symbol) {
1525 kci->etext = round_up(kci->last_symbol, page_size);
1526 kci->etext += page_size;
1527 }
1528
1529 kci->first_module = round_down(kci->first_module, page_size);
1530
1531 if (kci->last_module_symbol) {
1532 kci->last_module_symbol = round_up(kci->last_module_symbol,
1533 page_size);
1534 kci->last_module_symbol += page_size;
1535 }
1536
1537 if (!kci->stext || !kci->etext)
1538 return -1;
1539
1540 if (kci->first_module && !kci->last_module_symbol)
1541 return -1;
1542
1543 return kcore_copy__read_maps(kci, elf);
1544}
1545
1546static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1547 const char *name)
1548{
1549 char from_filename[PATH_MAX];
1550 char to_filename[PATH_MAX];
1551
1552 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1553 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1554
1555 return copyfile_mode(from_filename, to_filename, 0400);
1556}
1557
1558static int kcore_copy__unlink(const char *dir, const char *name)
1559{
1560 char filename[PATH_MAX];
1561
1562 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1563
1564 return unlink(filename);
1565}
1566
1567static int kcore_copy__compare_fds(int from, int to)
1568{
1569 char *buf_from;
1570 char *buf_to;
1571 ssize_t ret;
1572 size_t len;
1573 int err = -1;
1574
1575 buf_from = malloc(page_size);
1576 buf_to = malloc(page_size);
1577 if (!buf_from || !buf_to)
1578 goto out;
1579
1580 while (1) {
1581 /* Use read because mmap won't work on proc files */
1582 ret = read(from, buf_from, page_size);
1583 if (ret < 0)
1584 goto out;
1585
1586 if (!ret)
1587 break;
1588
1589 len = ret;
1590
1591 if (readn(to, buf_to, len) != (int)len)
1592 goto out;
1593
1594 if (memcmp(buf_from, buf_to, len))
1595 goto out;
1596 }
1597
1598 err = 0;
1599out:
1600 free(buf_to);
1601 free(buf_from);
1602 return err;
1603}
1604
1605static int kcore_copy__compare_files(const char *from_filename,
1606 const char *to_filename)
1607{
1608 int from, to, err = -1;
1609
1610 from = open(from_filename, O_RDONLY);
1611 if (from < 0)
1612 return -1;
1613
1614 to = open(to_filename, O_RDONLY);
1615 if (to < 0)
1616 goto out_close_from;
1617
1618 err = kcore_copy__compare_fds(from, to);
1619
1620 close(to);
1621out_close_from:
1622 close(from);
1623 return err;
1624}
1625
1626static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1627 const char *name)
1628{
1629 char from_filename[PATH_MAX];
1630 char to_filename[PATH_MAX];
1631
1632 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1633 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1634
1635 return kcore_copy__compare_files(from_filename, to_filename);
1636}
1637
1638/**
1639 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1640 * @from_dir: from directory
1641 * @to_dir: to directory
1642 *
1643 * This function copies kallsyms, modules and kcore files from one directory to
1644 * another. kallsyms and modules are copied entirely. Only code segments are
1645 * copied from kcore. It is assumed that two segments suffice: one for the
1646 * kernel proper and one for all the modules. The code segments are determined
1647 * from kallsyms and modules files. The kernel map starts at _stext or the
1648 * lowest function symbol, and ends at _etext or the highest function symbol.
1649 * The module map starts at the lowest module address and ends at the highest
1650 * module symbol. Start addresses are rounded down to the nearest page. End
1651 * addresses are rounded up to the nearest page. An extra page is added to the
1652 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1653 * symbol too. Because it contains only code sections, the resulting kcore is
1654 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1655 * is not the same for the kernel map and the modules map. That happens because
1656 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1657 * kallsyms and modules files are compared with their copies to check that
1658 * modules have not been loaded or unloaded while the copies were taking place.
1659 *
1660 * Return: %0 on success, %-1 on failure.
1661 */
1662int kcore_copy(const char *from_dir, const char *to_dir)
1663{
1664 struct kcore kcore;
1665 struct kcore extract;
1666 size_t count = 2;
1667 int idx = 0, err = -1;
1668 off_t offset = page_size, sz, modules_offset = 0;
1669 struct kcore_copy_info kci = { .stext = 0, };
1670 char kcore_filename[PATH_MAX];
1671 char extract_filename[PATH_MAX];
1672
1673 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1674 return -1;
1675
1676 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1677 goto out_unlink_kallsyms;
1678
1679 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1680 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1681
1682 if (kcore__open(&kcore, kcore_filename))
1683 goto out_unlink_modules;
1684
1685 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1686 goto out_kcore_close;
1687
1688 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1689 goto out_kcore_close;
1690
1691 if (!kci.modules_map.addr)
1692 count -= 1;
1693
1694 if (kcore__copy_hdr(&kcore, &extract, count))
1695 goto out_extract_close;
1696
1697 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1698 kci.kernel_map.len))
1699 goto out_extract_close;
1700
1701 if (kci.modules_map.addr) {
1702 modules_offset = offset + kci.kernel_map.len;
1703 if (kcore__add_phdr(&extract, idx, modules_offset,
1704 kci.modules_map.addr, kci.modules_map.len))
1705 goto out_extract_close;
1706 }
1707
1708 sz = kcore__write(&extract);
1709 if (sz < 0 || sz > offset)
1710 goto out_extract_close;
1711
1712 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1713 kci.kernel_map.len))
1714 goto out_extract_close;
1715
1716 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1717 extract.fd, modules_offset,
1718 kci.modules_map.len))
1719 goto out_extract_close;
1720
1721 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1722 goto out_extract_close;
1723
1724 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1725 goto out_extract_close;
1726
1727 err = 0;
1728
1729out_extract_close:
1730 kcore__close(&extract);
1731 if (err)
1732 unlink(extract_filename);
1733out_kcore_close:
1734 kcore__close(&kcore);
1735out_unlink_modules:
1736 if (err)
1737 kcore_copy__unlink(to_dir, "modules");
1738out_unlink_kallsyms:
1739 if (err)
1740 kcore_copy__unlink(to_dir, "kallsyms");
1741
1742 return err;
1743}
1744
1745int kcore_extract__create(struct kcore_extract *kce)
1746{
1747 struct kcore kcore;
1748 struct kcore extract;
1749 size_t count = 1;
1750 int idx = 0, err = -1;
1751 off_t offset = page_size, sz;
1752
1753 if (kcore__open(&kcore, kce->kcore_filename))
1754 return -1;
1755
1756 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1757 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1758 goto out_kcore_close;
1759
1760 if (kcore__copy_hdr(&kcore, &extract, count))
1761 goto out_extract_close;
1762
1763 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1764 goto out_extract_close;
1765
1766 sz = kcore__write(&extract);
1767 if (sz < 0 || sz > offset)
1768 goto out_extract_close;
1769
1770 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1771 goto out_extract_close;
1772
1773 err = 0;
1774
1775out_extract_close:
1776 kcore__close(&extract);
1777 if (err)
1778 unlink(kce->extract_filename);
1779out_kcore_close:
1780 kcore__close(&kcore);
1781
1782 return err;
1783}
1784
1785void kcore_extract__delete(struct kcore_extract *kce)
1786{
1787 unlink(kce->extract_filename);
1788}
1789
1790void symbol__elf_init(void)
1791{
1792 elf_version(EV_CURRENT);
1793}
1// SPDX-License-Identifier: GPL-2.0
2#include <fcntl.h>
3#include <stdio.h>
4#include <errno.h>
5#include <stdlib.h>
6#include <string.h>
7#include <unistd.h>
8#include <inttypes.h>
9
10#include "dso.h"
11#include "map.h"
12#include "maps.h"
13#include "symbol.h"
14#include "symsrc.h"
15#include "demangle-ocaml.h"
16#include "demangle-java.h"
17#include "demangle-rust.h"
18#include "machine.h"
19#include "vdso.h"
20#include "debug.h"
21#include "util/copyfile.h"
22#include <linux/ctype.h>
23#include <linux/kernel.h>
24#include <linux/zalloc.h>
25#include <symbol/kallsyms.h>
26#include <internal/lib.h>
27
28#ifndef EM_AARCH64
29#define EM_AARCH64 183 /* ARM 64 bit */
30#endif
31
32#ifndef ELF32_ST_VISIBILITY
33#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
34#endif
35
36/* For ELF64 the definitions are the same. */
37#ifndef ELF64_ST_VISIBILITY
38#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
39#endif
40
41/* How to extract information held in the st_other field. */
42#ifndef GELF_ST_VISIBILITY
43#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
44#endif
45
46typedef Elf64_Nhdr GElf_Nhdr;
47
48#ifndef DMGL_PARAMS
49#define DMGL_NO_OPTS 0 /* For readability... */
50#define DMGL_PARAMS (1 << 0) /* Include function args */
51#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
52#endif
53
54#ifdef HAVE_LIBBFD_SUPPORT
55#define PACKAGE 'perf'
56#include <bfd.h>
57#else
58#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
59extern char *cplus_demangle(const char *, int);
60
61static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
62{
63 return cplus_demangle(c, i);
64}
65#else
66#ifdef NO_DEMANGLE
67static inline char *bfd_demangle(void __maybe_unused *v,
68 const char __maybe_unused *c,
69 int __maybe_unused i)
70{
71 return NULL;
72}
73#endif
74#endif
75#endif
76
77#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
78static int elf_getphdrnum(Elf *elf, size_t *dst)
79{
80 GElf_Ehdr gehdr;
81 GElf_Ehdr *ehdr;
82
83 ehdr = gelf_getehdr(elf, &gehdr);
84 if (!ehdr)
85 return -1;
86
87 *dst = ehdr->e_phnum;
88
89 return 0;
90}
91#endif
92
93#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
94static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
95{
96 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
97 return -1;
98}
99#endif
100
101#ifndef NT_GNU_BUILD_ID
102#define NT_GNU_BUILD_ID 3
103#endif
104
105/**
106 * elf_symtab__for_each_symbol - iterate thru all the symbols
107 *
108 * @syms: struct elf_symtab instance to iterate
109 * @idx: uint32_t idx
110 * @sym: GElf_Sym iterator
111 */
112#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
113 for (idx = 0, gelf_getsym(syms, idx, &sym);\
114 idx < nr_syms; \
115 idx++, gelf_getsym(syms, idx, &sym))
116
117static inline uint8_t elf_sym__type(const GElf_Sym *sym)
118{
119 return GELF_ST_TYPE(sym->st_info);
120}
121
122static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
123{
124 return GELF_ST_VISIBILITY(sym->st_other);
125}
126
127#ifndef STT_GNU_IFUNC
128#define STT_GNU_IFUNC 10
129#endif
130
131static inline int elf_sym__is_function(const GElf_Sym *sym)
132{
133 return (elf_sym__type(sym) == STT_FUNC ||
134 elf_sym__type(sym) == STT_GNU_IFUNC) &&
135 sym->st_name != 0 &&
136 sym->st_shndx != SHN_UNDEF;
137}
138
139static inline bool elf_sym__is_object(const GElf_Sym *sym)
140{
141 return elf_sym__type(sym) == STT_OBJECT &&
142 sym->st_name != 0 &&
143 sym->st_shndx != SHN_UNDEF;
144}
145
146static inline int elf_sym__is_label(const GElf_Sym *sym)
147{
148 return elf_sym__type(sym) == STT_NOTYPE &&
149 sym->st_name != 0 &&
150 sym->st_shndx != SHN_UNDEF &&
151 sym->st_shndx != SHN_ABS &&
152 elf_sym__visibility(sym) != STV_HIDDEN &&
153 elf_sym__visibility(sym) != STV_INTERNAL;
154}
155
156static bool elf_sym__filter(GElf_Sym *sym)
157{
158 return elf_sym__is_function(sym) || elf_sym__is_object(sym);
159}
160
161static inline const char *elf_sym__name(const GElf_Sym *sym,
162 const Elf_Data *symstrs)
163{
164 return symstrs->d_buf + sym->st_name;
165}
166
167static inline const char *elf_sec__name(const GElf_Shdr *shdr,
168 const Elf_Data *secstrs)
169{
170 return secstrs->d_buf + shdr->sh_name;
171}
172
173static inline int elf_sec__is_text(const GElf_Shdr *shdr,
174 const Elf_Data *secstrs)
175{
176 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
177}
178
179static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
180 const Elf_Data *secstrs)
181{
182 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
183}
184
185static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
186{
187 return elf_sec__is_text(shdr, secstrs) ||
188 elf_sec__is_data(shdr, secstrs);
189}
190
191static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
192{
193 Elf_Scn *sec = NULL;
194 GElf_Shdr shdr;
195 size_t cnt = 1;
196
197 while ((sec = elf_nextscn(elf, sec)) != NULL) {
198 gelf_getshdr(sec, &shdr);
199
200 if ((addr >= shdr.sh_addr) &&
201 (addr < (shdr.sh_addr + shdr.sh_size)))
202 return cnt;
203
204 ++cnt;
205 }
206
207 return -1;
208}
209
210Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
211 GElf_Shdr *shp, const char *name, size_t *idx)
212{
213 Elf_Scn *sec = NULL;
214 size_t cnt = 1;
215
216 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
217 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
218 return NULL;
219
220 while ((sec = elf_nextscn(elf, sec)) != NULL) {
221 char *str;
222
223 gelf_getshdr(sec, shp);
224 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
225 if (str && !strcmp(name, str)) {
226 if (idx)
227 *idx = cnt;
228 return sec;
229 }
230 ++cnt;
231 }
232
233 return NULL;
234}
235
236static bool want_demangle(bool is_kernel_sym)
237{
238 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
239}
240
241static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
242{
243 int demangle_flags = verbose > 0 ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS;
244 char *demangled = NULL;
245
246 /*
247 * We need to figure out if the object was created from C++ sources
248 * DWARF DW_compile_unit has this, but we don't always have access
249 * to it...
250 */
251 if (!want_demangle(dso->kernel || kmodule))
252 return demangled;
253
254 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
255 if (demangled == NULL) {
256 demangled = ocaml_demangle_sym(elf_name);
257 if (demangled == NULL) {
258 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
259 }
260 }
261 else if (rust_is_mangled(demangled))
262 /*
263 * Input to Rust demangling is the BFD-demangled
264 * name which it Rust-demangles in place.
265 */
266 rust_demangle_sym(demangled);
267
268 return demangled;
269}
270
271#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
272 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
273 idx < nr_entries; \
274 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
275
276#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
277 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
278 idx < nr_entries; \
279 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
280
281/*
282 * We need to check if we have a .dynsym, so that we can handle the
283 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
284 * .dynsym or .symtab).
285 * And always look at the original dso, not at debuginfo packages, that
286 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
287 */
288int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
289{
290 uint32_t nr_rel_entries, idx;
291 GElf_Sym sym;
292 u64 plt_offset, plt_header_size, plt_entry_size;
293 GElf_Shdr shdr_plt;
294 struct symbol *f;
295 GElf_Shdr shdr_rel_plt, shdr_dynsym;
296 Elf_Data *reldata, *syms, *symstrs;
297 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
298 size_t dynsym_idx;
299 GElf_Ehdr ehdr;
300 char sympltname[1024];
301 Elf *elf;
302 int nr = 0, symidx, err = 0;
303
304 if (!ss->dynsym)
305 return 0;
306
307 elf = ss->elf;
308 ehdr = ss->ehdr;
309
310 scn_dynsym = ss->dynsym;
311 shdr_dynsym = ss->dynshdr;
312 dynsym_idx = ss->dynsym_idx;
313
314 if (scn_dynsym == NULL)
315 goto out_elf_end;
316
317 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
318 ".rela.plt", NULL);
319 if (scn_plt_rel == NULL) {
320 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
321 ".rel.plt", NULL);
322 if (scn_plt_rel == NULL)
323 goto out_elf_end;
324 }
325
326 err = -1;
327
328 if (shdr_rel_plt.sh_link != dynsym_idx)
329 goto out_elf_end;
330
331 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
332 goto out_elf_end;
333
334 /*
335 * Fetch the relocation section to find the idxes to the GOT
336 * and the symbols in the .dynsym they refer to.
337 */
338 reldata = elf_getdata(scn_plt_rel, NULL);
339 if (reldata == NULL)
340 goto out_elf_end;
341
342 syms = elf_getdata(scn_dynsym, NULL);
343 if (syms == NULL)
344 goto out_elf_end;
345
346 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
347 if (scn_symstrs == NULL)
348 goto out_elf_end;
349
350 symstrs = elf_getdata(scn_symstrs, NULL);
351 if (symstrs == NULL)
352 goto out_elf_end;
353
354 if (symstrs->d_size == 0)
355 goto out_elf_end;
356
357 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
358 plt_offset = shdr_plt.sh_offset;
359 switch (ehdr.e_machine) {
360 case EM_ARM:
361 plt_header_size = 20;
362 plt_entry_size = 12;
363 break;
364
365 case EM_AARCH64:
366 plt_header_size = 32;
367 plt_entry_size = 16;
368 break;
369
370 case EM_SPARC:
371 plt_header_size = 48;
372 plt_entry_size = 12;
373 break;
374
375 case EM_SPARCV9:
376 plt_header_size = 128;
377 plt_entry_size = 32;
378 break;
379
380 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
381 plt_header_size = shdr_plt.sh_entsize;
382 plt_entry_size = shdr_plt.sh_entsize;
383 break;
384 }
385 plt_offset += plt_header_size;
386
387 if (shdr_rel_plt.sh_type == SHT_RELA) {
388 GElf_Rela pos_mem, *pos;
389
390 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
391 nr_rel_entries) {
392 const char *elf_name = NULL;
393 char *demangled = NULL;
394 symidx = GELF_R_SYM(pos->r_info);
395 gelf_getsym(syms, symidx, &sym);
396
397 elf_name = elf_sym__name(&sym, symstrs);
398 demangled = demangle_sym(dso, 0, elf_name);
399 if (demangled != NULL)
400 elf_name = demangled;
401 snprintf(sympltname, sizeof(sympltname),
402 "%s@plt", elf_name);
403 free(demangled);
404
405 f = symbol__new(plt_offset, plt_entry_size,
406 STB_GLOBAL, STT_FUNC, sympltname);
407 if (!f)
408 goto out_elf_end;
409
410 plt_offset += plt_entry_size;
411 symbols__insert(&dso->symbols, f);
412 ++nr;
413 }
414 } else if (shdr_rel_plt.sh_type == SHT_REL) {
415 GElf_Rel pos_mem, *pos;
416 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
417 nr_rel_entries) {
418 const char *elf_name = NULL;
419 char *demangled = NULL;
420 symidx = GELF_R_SYM(pos->r_info);
421 gelf_getsym(syms, symidx, &sym);
422
423 elf_name = elf_sym__name(&sym, symstrs);
424 demangled = demangle_sym(dso, 0, elf_name);
425 if (demangled != NULL)
426 elf_name = demangled;
427 snprintf(sympltname, sizeof(sympltname),
428 "%s@plt", elf_name);
429 free(demangled);
430
431 f = symbol__new(plt_offset, plt_entry_size,
432 STB_GLOBAL, STT_FUNC, sympltname);
433 if (!f)
434 goto out_elf_end;
435
436 plt_offset += plt_entry_size;
437 symbols__insert(&dso->symbols, f);
438 ++nr;
439 }
440 }
441
442 err = 0;
443out_elf_end:
444 if (err == 0)
445 return nr;
446 pr_debug("%s: problems reading %s PLT info.\n",
447 __func__, dso->long_name);
448 return 0;
449}
450
451char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
452{
453 return demangle_sym(dso, kmodule, elf_name);
454}
455
456/*
457 * Align offset to 4 bytes as needed for note name and descriptor data.
458 */
459#define NOTE_ALIGN(n) (((n) + 3) & -4U)
460
461static int elf_read_build_id(Elf *elf, void *bf, size_t size)
462{
463 int err = -1;
464 GElf_Ehdr ehdr;
465 GElf_Shdr shdr;
466 Elf_Data *data;
467 Elf_Scn *sec;
468 Elf_Kind ek;
469 void *ptr;
470
471 if (size < BUILD_ID_SIZE)
472 goto out;
473
474 ek = elf_kind(elf);
475 if (ek != ELF_K_ELF)
476 goto out;
477
478 if (gelf_getehdr(elf, &ehdr) == NULL) {
479 pr_err("%s: cannot get elf header.\n", __func__);
480 goto out;
481 }
482
483 /*
484 * Check following sections for notes:
485 * '.note.gnu.build-id'
486 * '.notes'
487 * '.note' (VDSO specific)
488 */
489 do {
490 sec = elf_section_by_name(elf, &ehdr, &shdr,
491 ".note.gnu.build-id", NULL);
492 if (sec)
493 break;
494
495 sec = elf_section_by_name(elf, &ehdr, &shdr,
496 ".notes", NULL);
497 if (sec)
498 break;
499
500 sec = elf_section_by_name(elf, &ehdr, &shdr,
501 ".note", NULL);
502 if (sec)
503 break;
504
505 return err;
506
507 } while (0);
508
509 data = elf_getdata(sec, NULL);
510 if (data == NULL)
511 goto out;
512
513 ptr = data->d_buf;
514 while (ptr < (data->d_buf + data->d_size)) {
515 GElf_Nhdr *nhdr = ptr;
516 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
517 descsz = NOTE_ALIGN(nhdr->n_descsz);
518 const char *name;
519
520 ptr += sizeof(*nhdr);
521 name = ptr;
522 ptr += namesz;
523 if (nhdr->n_type == NT_GNU_BUILD_ID &&
524 nhdr->n_namesz == sizeof("GNU")) {
525 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
526 size_t sz = min(size, descsz);
527 memcpy(bf, ptr, sz);
528 memset(bf + sz, 0, size - sz);
529 err = descsz;
530 break;
531 }
532 }
533 ptr += descsz;
534 }
535
536out:
537 return err;
538}
539
540#ifdef HAVE_LIBBFD_BUILDID_SUPPORT
541
542static int read_build_id(const char *filename, struct build_id *bid)
543{
544 size_t size = sizeof(bid->data);
545 int err = -1;
546 bfd *abfd;
547
548 abfd = bfd_openr(filename, NULL);
549 if (!abfd)
550 return -1;
551
552 if (!bfd_check_format(abfd, bfd_object)) {
553 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
554 goto out_close;
555 }
556
557 if (!abfd->build_id || abfd->build_id->size > size)
558 goto out_close;
559
560 memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
561 memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
562 err = bid->size = abfd->build_id->size;
563
564out_close:
565 bfd_close(abfd);
566 return err;
567}
568
569#else // HAVE_LIBBFD_BUILDID_SUPPORT
570
571static int read_build_id(const char *filename, struct build_id *bid)
572{
573 size_t size = sizeof(bid->data);
574 int fd, err = -1;
575 Elf *elf;
576
577 if (size < BUILD_ID_SIZE)
578 goto out;
579
580 fd = open(filename, O_RDONLY);
581 if (fd < 0)
582 goto out;
583
584 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
585 if (elf == NULL) {
586 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
587 goto out_close;
588 }
589
590 err = elf_read_build_id(elf, bid->data, size);
591 if (err > 0)
592 bid->size = err;
593
594 elf_end(elf);
595out_close:
596 close(fd);
597out:
598 return err;
599}
600
601#endif // HAVE_LIBBFD_BUILDID_SUPPORT
602
603int filename__read_build_id(const char *filename, struct build_id *bid)
604{
605 struct kmod_path m = { .name = NULL, };
606 char path[PATH_MAX];
607 int err;
608
609 if (!filename)
610 return -EFAULT;
611
612 err = kmod_path__parse(&m, filename);
613 if (err)
614 return -1;
615
616 if (m.comp) {
617 int error = 0, fd;
618
619 fd = filename__decompress(filename, path, sizeof(path), m.comp, &error);
620 if (fd < 0) {
621 pr_debug("Failed to decompress (error %d) %s\n",
622 error, filename);
623 return -1;
624 }
625 close(fd);
626 filename = path;
627 }
628
629 err = read_build_id(filename, bid);
630
631 if (m.comp)
632 unlink(filename);
633 return err;
634}
635
636int sysfs__read_build_id(const char *filename, struct build_id *bid)
637{
638 size_t size = sizeof(bid->data);
639 int fd, err = -1;
640
641 fd = open(filename, O_RDONLY);
642 if (fd < 0)
643 goto out;
644
645 while (1) {
646 char bf[BUFSIZ];
647 GElf_Nhdr nhdr;
648 size_t namesz, descsz;
649
650 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
651 break;
652
653 namesz = NOTE_ALIGN(nhdr.n_namesz);
654 descsz = NOTE_ALIGN(nhdr.n_descsz);
655 if (nhdr.n_type == NT_GNU_BUILD_ID &&
656 nhdr.n_namesz == sizeof("GNU")) {
657 if (read(fd, bf, namesz) != (ssize_t)namesz)
658 break;
659 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
660 size_t sz = min(descsz, size);
661 if (read(fd, bid->data, sz) == (ssize_t)sz) {
662 memset(bid->data + sz, 0, size - sz);
663 bid->size = sz;
664 err = 0;
665 break;
666 }
667 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
668 break;
669 } else {
670 int n = namesz + descsz;
671
672 if (n > (int)sizeof(bf)) {
673 n = sizeof(bf);
674 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
675 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
676 }
677 if (read(fd, bf, n) != n)
678 break;
679 }
680 }
681 close(fd);
682out:
683 return err;
684}
685
686#ifdef HAVE_LIBBFD_SUPPORT
687
688int filename__read_debuglink(const char *filename, char *debuglink,
689 size_t size)
690{
691 int err = -1;
692 asection *section;
693 bfd *abfd;
694
695 abfd = bfd_openr(filename, NULL);
696 if (!abfd)
697 return -1;
698
699 if (!bfd_check_format(abfd, bfd_object)) {
700 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
701 goto out_close;
702 }
703
704 section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
705 if (!section)
706 goto out_close;
707
708 if (section->size > size)
709 goto out_close;
710
711 if (!bfd_get_section_contents(abfd, section, debuglink, 0,
712 section->size))
713 goto out_close;
714
715 err = 0;
716
717out_close:
718 bfd_close(abfd);
719 return err;
720}
721
722#else
723
724int filename__read_debuglink(const char *filename, char *debuglink,
725 size_t size)
726{
727 int fd, err = -1;
728 Elf *elf;
729 GElf_Ehdr ehdr;
730 GElf_Shdr shdr;
731 Elf_Data *data;
732 Elf_Scn *sec;
733 Elf_Kind ek;
734
735 fd = open(filename, O_RDONLY);
736 if (fd < 0)
737 goto out;
738
739 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
740 if (elf == NULL) {
741 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
742 goto out_close;
743 }
744
745 ek = elf_kind(elf);
746 if (ek != ELF_K_ELF)
747 goto out_elf_end;
748
749 if (gelf_getehdr(elf, &ehdr) == NULL) {
750 pr_err("%s: cannot get elf header.\n", __func__);
751 goto out_elf_end;
752 }
753
754 sec = elf_section_by_name(elf, &ehdr, &shdr,
755 ".gnu_debuglink", NULL);
756 if (sec == NULL)
757 goto out_elf_end;
758
759 data = elf_getdata(sec, NULL);
760 if (data == NULL)
761 goto out_elf_end;
762
763 /* the start of this section is a zero-terminated string */
764 strncpy(debuglink, data->d_buf, size);
765
766 err = 0;
767
768out_elf_end:
769 elf_end(elf);
770out_close:
771 close(fd);
772out:
773 return err;
774}
775
776#endif
777
778static int dso__swap_init(struct dso *dso, unsigned char eidata)
779{
780 static unsigned int const endian = 1;
781
782 dso->needs_swap = DSO_SWAP__NO;
783
784 switch (eidata) {
785 case ELFDATA2LSB:
786 /* We are big endian, DSO is little endian. */
787 if (*(unsigned char const *)&endian != 1)
788 dso->needs_swap = DSO_SWAP__YES;
789 break;
790
791 case ELFDATA2MSB:
792 /* We are little endian, DSO is big endian. */
793 if (*(unsigned char const *)&endian != 0)
794 dso->needs_swap = DSO_SWAP__YES;
795 break;
796
797 default:
798 pr_err("unrecognized DSO data encoding %d\n", eidata);
799 return -EINVAL;
800 }
801
802 return 0;
803}
804
805bool symsrc__possibly_runtime(struct symsrc *ss)
806{
807 return ss->dynsym || ss->opdsec;
808}
809
810bool symsrc__has_symtab(struct symsrc *ss)
811{
812 return ss->symtab != NULL;
813}
814
815void symsrc__destroy(struct symsrc *ss)
816{
817 zfree(&ss->name);
818 elf_end(ss->elf);
819 close(ss->fd);
820}
821
822bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
823{
824 /*
825 * Usually vmlinux is an ELF file with type ET_EXEC for most
826 * architectures; except Arm64 kernel is linked with option
827 * '-share', so need to check type ET_DYN.
828 */
829 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
830 ehdr.e_type == ET_DYN;
831}
832
833int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
834 enum dso_binary_type type)
835{
836 GElf_Ehdr ehdr;
837 Elf *elf;
838 int fd;
839
840 if (dso__needs_decompress(dso)) {
841 fd = dso__decompress_kmodule_fd(dso, name);
842 if (fd < 0)
843 return -1;
844
845 type = dso->symtab_type;
846 } else {
847 fd = open(name, O_RDONLY);
848 if (fd < 0) {
849 dso->load_errno = errno;
850 return -1;
851 }
852 }
853
854 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
855 if (elf == NULL) {
856 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
857 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
858 goto out_close;
859 }
860
861 if (gelf_getehdr(elf, &ehdr) == NULL) {
862 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
863 pr_debug("%s: cannot get elf header.\n", __func__);
864 goto out_elf_end;
865 }
866
867 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
868 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
869 goto out_elf_end;
870 }
871
872 /* Always reject images with a mismatched build-id: */
873 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
874 u8 build_id[BUILD_ID_SIZE];
875 struct build_id bid;
876 int size;
877
878 size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE);
879 if (size <= 0) {
880 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
881 goto out_elf_end;
882 }
883
884 build_id__init(&bid, build_id, size);
885 if (!dso__build_id_equal(dso, &bid)) {
886 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
887 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
888 goto out_elf_end;
889 }
890 }
891
892 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
893
894 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
895 NULL);
896 if (ss->symshdr.sh_type != SHT_SYMTAB)
897 ss->symtab = NULL;
898
899 ss->dynsym_idx = 0;
900 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
901 &ss->dynsym_idx);
902 if (ss->dynshdr.sh_type != SHT_DYNSYM)
903 ss->dynsym = NULL;
904
905 ss->opdidx = 0;
906 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
907 &ss->opdidx);
908 if (ss->opdshdr.sh_type != SHT_PROGBITS)
909 ss->opdsec = NULL;
910
911 if (dso->kernel == DSO_SPACE__USER)
912 ss->adjust_symbols = true;
913 else
914 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
915
916 ss->name = strdup(name);
917 if (!ss->name) {
918 dso->load_errno = errno;
919 goto out_elf_end;
920 }
921
922 ss->elf = elf;
923 ss->fd = fd;
924 ss->ehdr = ehdr;
925 ss->type = type;
926
927 return 0;
928
929out_elf_end:
930 elf_end(elf);
931out_close:
932 close(fd);
933 return -1;
934}
935
936/**
937 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
938 * @kmap: kernel maps and relocation reference symbol
939 *
940 * This function returns %true if we are dealing with the kernel maps and the
941 * relocation reference symbol has not yet been found. Otherwise %false is
942 * returned.
943 */
944static bool ref_reloc_sym_not_found(struct kmap *kmap)
945{
946 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
947 !kmap->ref_reloc_sym->unrelocated_addr;
948}
949
950/**
951 * ref_reloc - kernel relocation offset.
952 * @kmap: kernel maps and relocation reference symbol
953 *
954 * This function returns the offset of kernel addresses as determined by using
955 * the relocation reference symbol i.e. if the kernel has not been relocated
956 * then the return value is zero.
957 */
958static u64 ref_reloc(struct kmap *kmap)
959{
960 if (kmap && kmap->ref_reloc_sym &&
961 kmap->ref_reloc_sym->unrelocated_addr)
962 return kmap->ref_reloc_sym->addr -
963 kmap->ref_reloc_sym->unrelocated_addr;
964 return 0;
965}
966
967void __weak arch__sym_update(struct symbol *s __maybe_unused,
968 GElf_Sym *sym __maybe_unused) { }
969
970static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
971 GElf_Sym *sym, GElf_Shdr *shdr,
972 struct maps *kmaps, struct kmap *kmap,
973 struct dso **curr_dsop, struct map **curr_mapp,
974 const char *section_name,
975 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
976{
977 struct dso *curr_dso = *curr_dsop;
978 struct map *curr_map;
979 char dso_name[PATH_MAX];
980
981 /* Adjust symbol to map to file offset */
982 if (adjust_kernel_syms)
983 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
984
985 if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
986 return 0;
987
988 if (strcmp(section_name, ".text") == 0) {
989 /*
990 * The initial kernel mapping is based on
991 * kallsyms and identity maps. Overwrite it to
992 * map to the kernel dso.
993 */
994 if (*remap_kernel && dso->kernel && !kmodule) {
995 *remap_kernel = false;
996 map->start = shdr->sh_addr + ref_reloc(kmap);
997 map->end = map->start + shdr->sh_size;
998 map->pgoff = shdr->sh_offset;
999 map->map_ip = map__map_ip;
1000 map->unmap_ip = map__unmap_ip;
1001 /* Ensure maps are correctly ordered */
1002 if (kmaps) {
1003 map__get(map);
1004 maps__remove(kmaps, map);
1005 maps__insert(kmaps, map);
1006 map__put(map);
1007 }
1008 }
1009
1010 /*
1011 * The initial module mapping is based on
1012 * /proc/modules mapped to offset zero.
1013 * Overwrite it to map to the module dso.
1014 */
1015 if (*remap_kernel && kmodule) {
1016 *remap_kernel = false;
1017 map->pgoff = shdr->sh_offset;
1018 }
1019
1020 *curr_mapp = map;
1021 *curr_dsop = dso;
1022 return 0;
1023 }
1024
1025 if (!kmap)
1026 return 0;
1027
1028 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
1029
1030 curr_map = maps__find_by_name(kmaps, dso_name);
1031 if (curr_map == NULL) {
1032 u64 start = sym->st_value;
1033
1034 if (kmodule)
1035 start += map->start + shdr->sh_offset;
1036
1037 curr_dso = dso__new(dso_name);
1038 if (curr_dso == NULL)
1039 return -1;
1040 curr_dso->kernel = dso->kernel;
1041 curr_dso->long_name = dso->long_name;
1042 curr_dso->long_name_len = dso->long_name_len;
1043 curr_map = map__new2(start, curr_dso);
1044 dso__put(curr_dso);
1045 if (curr_map == NULL)
1046 return -1;
1047
1048 if (curr_dso->kernel)
1049 map__kmap(curr_map)->kmaps = kmaps;
1050
1051 if (adjust_kernel_syms) {
1052 curr_map->start = shdr->sh_addr + ref_reloc(kmap);
1053 curr_map->end = curr_map->start + shdr->sh_size;
1054 curr_map->pgoff = shdr->sh_offset;
1055 } else {
1056 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
1057 }
1058 curr_dso->symtab_type = dso->symtab_type;
1059 maps__insert(kmaps, curr_map);
1060 /*
1061 * Add it before we drop the reference to curr_map, i.e. while
1062 * we still are sure to have a reference to this DSO via
1063 * *curr_map->dso.
1064 */
1065 dsos__add(&kmaps->machine->dsos, curr_dso);
1066 /* kmaps already got it */
1067 map__put(curr_map);
1068 dso__set_loaded(curr_dso);
1069 *curr_mapp = curr_map;
1070 *curr_dsop = curr_dso;
1071 } else
1072 *curr_dsop = curr_map->dso;
1073
1074 return 0;
1075}
1076
1077static int
1078dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1079 struct symsrc *runtime_ss, int kmodule, int dynsym)
1080{
1081 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
1082 struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
1083 struct map *curr_map = map;
1084 struct dso *curr_dso = dso;
1085 Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym;
1086 uint32_t nr_syms;
1087 int err = -1;
1088 uint32_t idx;
1089 GElf_Ehdr ehdr;
1090 GElf_Shdr shdr;
1091 GElf_Shdr tshdr;
1092 Elf_Data *syms, *opddata = NULL;
1093 GElf_Sym sym;
1094 Elf_Scn *sec, *sec_strndx;
1095 Elf *elf;
1096 int nr = 0;
1097 bool remap_kernel = false, adjust_kernel_syms = false;
1098
1099 if (kmap && !kmaps)
1100 return -1;
1101
1102 elf = syms_ss->elf;
1103 ehdr = syms_ss->ehdr;
1104 if (dynsym) {
1105 sec = syms_ss->dynsym;
1106 shdr = syms_ss->dynshdr;
1107 } else {
1108 sec = syms_ss->symtab;
1109 shdr = syms_ss->symshdr;
1110 }
1111
1112 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
1113 ".text", NULL))
1114 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
1115
1116 if (runtime_ss->opdsec)
1117 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
1118
1119 syms = elf_getdata(sec, NULL);
1120 if (syms == NULL)
1121 goto out_elf_end;
1122
1123 sec = elf_getscn(elf, shdr.sh_link);
1124 if (sec == NULL)
1125 goto out_elf_end;
1126
1127 symstrs = elf_getdata(sec, NULL);
1128 if (symstrs == NULL)
1129 goto out_elf_end;
1130
1131 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
1132 if (sec_strndx == NULL)
1133 goto out_elf_end;
1134
1135 secstrs_run = elf_getdata(sec_strndx, NULL);
1136 if (secstrs_run == NULL)
1137 goto out_elf_end;
1138
1139 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
1140 if (sec_strndx == NULL)
1141 goto out_elf_end;
1142
1143 secstrs_sym = elf_getdata(sec_strndx, NULL);
1144 if (secstrs_sym == NULL)
1145 goto out_elf_end;
1146
1147 nr_syms = shdr.sh_size / shdr.sh_entsize;
1148
1149 memset(&sym, 0, sizeof(sym));
1150
1151 /*
1152 * The kernel relocation symbol is needed in advance in order to adjust
1153 * kernel maps correctly.
1154 */
1155 if (ref_reloc_sym_not_found(kmap)) {
1156 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1157 const char *elf_name = elf_sym__name(&sym, symstrs);
1158
1159 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
1160 continue;
1161 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
1162 map->reloc = kmap->ref_reloc_sym->addr -
1163 kmap->ref_reloc_sym->unrelocated_addr;
1164 break;
1165 }
1166 }
1167
1168 /*
1169 * Handle any relocation of vdso necessary because older kernels
1170 * attempted to prelink vdso to its virtual address.
1171 */
1172 if (dso__is_vdso(dso))
1173 map->reloc = map->start - dso->text_offset;
1174
1175 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
1176 /*
1177 * Initial kernel and module mappings do not map to the dso.
1178 * Flag the fixups.
1179 */
1180 if (dso->kernel) {
1181 remap_kernel = true;
1182 adjust_kernel_syms = dso->adjust_symbols;
1183 }
1184 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1185 struct symbol *f;
1186 const char *elf_name = elf_sym__name(&sym, symstrs);
1187 char *demangled = NULL;
1188 int is_label = elf_sym__is_label(&sym);
1189 const char *section_name;
1190 bool used_opd = false;
1191
1192 if (!is_label && !elf_sym__filter(&sym))
1193 continue;
1194
1195 /* Reject ARM ELF "mapping symbols": these aren't unique and
1196 * don't identify functions, so will confuse the profile
1197 * output: */
1198 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
1199 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
1200 && (elf_name[2] == '\0' || elf_name[2] == '.'))
1201 continue;
1202 }
1203
1204 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
1205 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
1206 u64 *opd = opddata->d_buf + offset;
1207 sym.st_value = DSO__SWAP(dso, u64, *opd);
1208 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
1209 sym.st_value);
1210 used_opd = true;
1211 }
1212 /*
1213 * When loading symbols in a data mapping, ABS symbols (which
1214 * has a value of SHN_ABS in its st_shndx) failed at
1215 * elf_getscn(). And it marks the loading as a failure so
1216 * already loaded symbols cannot be fixed up.
1217 *
1218 * I'm not sure what should be done. Just ignore them for now.
1219 * - Namhyung Kim
1220 */
1221 if (sym.st_shndx == SHN_ABS)
1222 continue;
1223
1224 sec = elf_getscn(syms_ss->elf, sym.st_shndx);
1225 if (!sec)
1226 goto out_elf_end;
1227
1228 gelf_getshdr(sec, &shdr);
1229
1230 secstrs = secstrs_sym;
1231
1232 /*
1233 * We have to fallback to runtime when syms' section header has
1234 * NOBITS set. NOBITS results in file offset (sh_offset) not
1235 * being incremented. So sh_offset used below has different
1236 * values for syms (invalid) and runtime (valid).
1237 */
1238 if (shdr.sh_type == SHT_NOBITS) {
1239 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
1240 if (!sec)
1241 goto out_elf_end;
1242
1243 gelf_getshdr(sec, &shdr);
1244 secstrs = secstrs_run;
1245 }
1246
1247 if (is_label && !elf_sec__filter(&shdr, secstrs))
1248 continue;
1249
1250 section_name = elf_sec__name(&shdr, secstrs);
1251
1252 /* On ARM, symbols for thumb functions have 1 added to
1253 * the symbol address as a flag - remove it */
1254 if ((ehdr.e_machine == EM_ARM) &&
1255 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
1256 (sym.st_value & 1))
1257 --sym.st_value;
1258
1259 if (dso->kernel) {
1260 if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
1261 section_name, adjust_kernel_syms, kmodule, &remap_kernel))
1262 goto out_elf_end;
1263 } else if ((used_opd && runtime_ss->adjust_symbols) ||
1264 (!used_opd && syms_ss->adjust_symbols)) {
1265 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1266 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1267 (u64)sym.st_value, (u64)shdr.sh_addr,
1268 (u64)shdr.sh_offset);
1269 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1270 }
1271
1272 demangled = demangle_sym(dso, kmodule, elf_name);
1273 if (demangled != NULL)
1274 elf_name = demangled;
1275
1276 f = symbol__new(sym.st_value, sym.st_size,
1277 GELF_ST_BIND(sym.st_info),
1278 GELF_ST_TYPE(sym.st_info), elf_name);
1279 free(demangled);
1280 if (!f)
1281 goto out_elf_end;
1282
1283 arch__sym_update(f, &sym);
1284
1285 __symbols__insert(&curr_dso->symbols, f, dso->kernel);
1286 nr++;
1287 }
1288
1289 /*
1290 * For misannotated, zeroed, ASM function sizes.
1291 */
1292 if (nr > 0) {
1293 symbols__fixup_end(&dso->symbols);
1294 symbols__fixup_duplicate(&dso->symbols);
1295 if (kmap) {
1296 /*
1297 * We need to fixup this here too because we create new
1298 * maps here, for things like vsyscall sections.
1299 */
1300 maps__fixup_end(kmaps);
1301 }
1302 }
1303 err = nr;
1304out_elf_end:
1305 return err;
1306}
1307
1308int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1309 struct symsrc *runtime_ss, int kmodule)
1310{
1311 int nr = 0;
1312 int err = -1;
1313
1314 dso->symtab_type = syms_ss->type;
1315 dso->is_64_bit = syms_ss->is_64_bit;
1316 dso->rel = syms_ss->ehdr.e_type == ET_REL;
1317
1318 /*
1319 * Modules may already have symbols from kallsyms, but those symbols
1320 * have the wrong values for the dso maps, so remove them.
1321 */
1322 if (kmodule && syms_ss->symtab)
1323 symbols__delete(&dso->symbols);
1324
1325 if (!syms_ss->symtab) {
1326 /*
1327 * If the vmlinux is stripped, fail so we will fall back
1328 * to using kallsyms. The vmlinux runtime symbols aren't
1329 * of much use.
1330 */
1331 if (dso->kernel)
1332 return err;
1333 } else {
1334 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1335 kmodule, 0);
1336 if (err < 0)
1337 return err;
1338 nr = err;
1339 }
1340
1341 if (syms_ss->dynsym) {
1342 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1343 kmodule, 1);
1344 if (err < 0)
1345 return err;
1346 err += nr;
1347 }
1348
1349 return err;
1350}
1351
1352static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1353{
1354 GElf_Phdr phdr;
1355 size_t i, phdrnum;
1356 int err;
1357 u64 sz;
1358
1359 if (elf_getphdrnum(elf, &phdrnum))
1360 return -1;
1361
1362 for (i = 0; i < phdrnum; i++) {
1363 if (gelf_getphdr(elf, i, &phdr) == NULL)
1364 return -1;
1365 if (phdr.p_type != PT_LOAD)
1366 continue;
1367 if (exe) {
1368 if (!(phdr.p_flags & PF_X))
1369 continue;
1370 } else {
1371 if (!(phdr.p_flags & PF_R))
1372 continue;
1373 }
1374 sz = min(phdr.p_memsz, phdr.p_filesz);
1375 if (!sz)
1376 continue;
1377 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1378 if (err)
1379 return err;
1380 }
1381 return 0;
1382}
1383
1384int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1385 bool *is_64_bit)
1386{
1387 int err;
1388 Elf *elf;
1389
1390 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1391 if (elf == NULL)
1392 return -1;
1393
1394 if (is_64_bit)
1395 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1396
1397 err = elf_read_maps(elf, exe, mapfn, data);
1398
1399 elf_end(elf);
1400 return err;
1401}
1402
1403enum dso_type dso__type_fd(int fd)
1404{
1405 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1406 GElf_Ehdr ehdr;
1407 Elf_Kind ek;
1408 Elf *elf;
1409
1410 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1411 if (elf == NULL)
1412 goto out;
1413
1414 ek = elf_kind(elf);
1415 if (ek != ELF_K_ELF)
1416 goto out_end;
1417
1418 if (gelf_getclass(elf) == ELFCLASS64) {
1419 dso_type = DSO__TYPE_64BIT;
1420 goto out_end;
1421 }
1422
1423 if (gelf_getehdr(elf, &ehdr) == NULL)
1424 goto out_end;
1425
1426 if (ehdr.e_machine == EM_X86_64)
1427 dso_type = DSO__TYPE_X32BIT;
1428 else
1429 dso_type = DSO__TYPE_32BIT;
1430out_end:
1431 elf_end(elf);
1432out:
1433 return dso_type;
1434}
1435
1436static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1437{
1438 ssize_t r;
1439 size_t n;
1440 int err = -1;
1441 char *buf = malloc(page_size);
1442
1443 if (buf == NULL)
1444 return -1;
1445
1446 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1447 goto out;
1448
1449 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1450 goto out;
1451
1452 while (len) {
1453 n = page_size;
1454 if (len < n)
1455 n = len;
1456 /* Use read because mmap won't work on proc files */
1457 r = read(from, buf, n);
1458 if (r < 0)
1459 goto out;
1460 if (!r)
1461 break;
1462 n = r;
1463 r = write(to, buf, n);
1464 if (r < 0)
1465 goto out;
1466 if ((size_t)r != n)
1467 goto out;
1468 len -= n;
1469 }
1470
1471 err = 0;
1472out:
1473 free(buf);
1474 return err;
1475}
1476
1477struct kcore {
1478 int fd;
1479 int elfclass;
1480 Elf *elf;
1481 GElf_Ehdr ehdr;
1482};
1483
1484static int kcore__open(struct kcore *kcore, const char *filename)
1485{
1486 GElf_Ehdr *ehdr;
1487
1488 kcore->fd = open(filename, O_RDONLY);
1489 if (kcore->fd == -1)
1490 return -1;
1491
1492 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1493 if (!kcore->elf)
1494 goto out_close;
1495
1496 kcore->elfclass = gelf_getclass(kcore->elf);
1497 if (kcore->elfclass == ELFCLASSNONE)
1498 goto out_end;
1499
1500 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1501 if (!ehdr)
1502 goto out_end;
1503
1504 return 0;
1505
1506out_end:
1507 elf_end(kcore->elf);
1508out_close:
1509 close(kcore->fd);
1510 return -1;
1511}
1512
1513static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1514 bool temp)
1515{
1516 kcore->elfclass = elfclass;
1517
1518 if (temp)
1519 kcore->fd = mkstemp(filename);
1520 else
1521 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1522 if (kcore->fd == -1)
1523 return -1;
1524
1525 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1526 if (!kcore->elf)
1527 goto out_close;
1528
1529 if (!gelf_newehdr(kcore->elf, elfclass))
1530 goto out_end;
1531
1532 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
1533
1534 return 0;
1535
1536out_end:
1537 elf_end(kcore->elf);
1538out_close:
1539 close(kcore->fd);
1540 unlink(filename);
1541 return -1;
1542}
1543
1544static void kcore__close(struct kcore *kcore)
1545{
1546 elf_end(kcore->elf);
1547 close(kcore->fd);
1548}
1549
1550static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1551{
1552 GElf_Ehdr *ehdr = &to->ehdr;
1553 GElf_Ehdr *kehdr = &from->ehdr;
1554
1555 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1556 ehdr->e_type = kehdr->e_type;
1557 ehdr->e_machine = kehdr->e_machine;
1558 ehdr->e_version = kehdr->e_version;
1559 ehdr->e_entry = 0;
1560 ehdr->e_shoff = 0;
1561 ehdr->e_flags = kehdr->e_flags;
1562 ehdr->e_phnum = count;
1563 ehdr->e_shentsize = 0;
1564 ehdr->e_shnum = 0;
1565 ehdr->e_shstrndx = 0;
1566
1567 if (from->elfclass == ELFCLASS32) {
1568 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1569 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1570 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1571 } else {
1572 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1573 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1574 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1575 }
1576
1577 if (!gelf_update_ehdr(to->elf, ehdr))
1578 return -1;
1579
1580 if (!gelf_newphdr(to->elf, count))
1581 return -1;
1582
1583 return 0;
1584}
1585
1586static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1587 u64 addr, u64 len)
1588{
1589 GElf_Phdr phdr = {
1590 .p_type = PT_LOAD,
1591 .p_flags = PF_R | PF_W | PF_X,
1592 .p_offset = offset,
1593 .p_vaddr = addr,
1594 .p_paddr = 0,
1595 .p_filesz = len,
1596 .p_memsz = len,
1597 .p_align = page_size,
1598 };
1599
1600 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
1601 return -1;
1602
1603 return 0;
1604}
1605
1606static off_t kcore__write(struct kcore *kcore)
1607{
1608 return elf_update(kcore->elf, ELF_C_WRITE);
1609}
1610
1611struct phdr_data {
1612 off_t offset;
1613 off_t rel;
1614 u64 addr;
1615 u64 len;
1616 struct list_head node;
1617 struct phdr_data *remaps;
1618};
1619
1620struct sym_data {
1621 u64 addr;
1622 struct list_head node;
1623};
1624
1625struct kcore_copy_info {
1626 u64 stext;
1627 u64 etext;
1628 u64 first_symbol;
1629 u64 last_symbol;
1630 u64 first_module;
1631 u64 first_module_symbol;
1632 u64 last_module_symbol;
1633 size_t phnum;
1634 struct list_head phdrs;
1635 struct list_head syms;
1636};
1637
1638#define kcore_copy__for_each_phdr(k, p) \
1639 list_for_each_entry((p), &(k)->phdrs, node)
1640
1641static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
1642{
1643 struct phdr_data *p = zalloc(sizeof(*p));
1644
1645 if (p) {
1646 p->addr = addr;
1647 p->len = len;
1648 p->offset = offset;
1649 }
1650
1651 return p;
1652}
1653
1654static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
1655 u64 addr, u64 len,
1656 off_t offset)
1657{
1658 struct phdr_data *p = phdr_data__new(addr, len, offset);
1659
1660 if (p)
1661 list_add_tail(&p->node, &kci->phdrs);
1662
1663 return p;
1664}
1665
1666static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
1667{
1668 struct phdr_data *p, *tmp;
1669
1670 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
1671 list_del_init(&p->node);
1672 free(p);
1673 }
1674}
1675
1676static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
1677 u64 addr)
1678{
1679 struct sym_data *s = zalloc(sizeof(*s));
1680
1681 if (s) {
1682 s->addr = addr;
1683 list_add_tail(&s->node, &kci->syms);
1684 }
1685
1686 return s;
1687}
1688
1689static void kcore_copy__free_syms(struct kcore_copy_info *kci)
1690{
1691 struct sym_data *s, *tmp;
1692
1693 list_for_each_entry_safe(s, tmp, &kci->syms, node) {
1694 list_del_init(&s->node);
1695 free(s);
1696 }
1697}
1698
1699static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1700 u64 start)
1701{
1702 struct kcore_copy_info *kci = arg;
1703
1704 if (!kallsyms__is_function(type))
1705 return 0;
1706
1707 if (strchr(name, '[')) {
1708 if (!kci->first_module_symbol || start < kci->first_module_symbol)
1709 kci->first_module_symbol = start;
1710 if (start > kci->last_module_symbol)
1711 kci->last_module_symbol = start;
1712 return 0;
1713 }
1714
1715 if (!kci->first_symbol || start < kci->first_symbol)
1716 kci->first_symbol = start;
1717
1718 if (!kci->last_symbol || start > kci->last_symbol)
1719 kci->last_symbol = start;
1720
1721 if (!strcmp(name, "_stext")) {
1722 kci->stext = start;
1723 return 0;
1724 }
1725
1726 if (!strcmp(name, "_etext")) {
1727 kci->etext = start;
1728 return 0;
1729 }
1730
1731 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
1732 return -1;
1733
1734 return 0;
1735}
1736
1737static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1738 const char *dir)
1739{
1740 char kallsyms_filename[PATH_MAX];
1741
1742 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1743
1744 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1745 return -1;
1746
1747 if (kallsyms__parse(kallsyms_filename, kci,
1748 kcore_copy__process_kallsyms) < 0)
1749 return -1;
1750
1751 return 0;
1752}
1753
1754static int kcore_copy__process_modules(void *arg,
1755 const char *name __maybe_unused,
1756 u64 start, u64 size __maybe_unused)
1757{
1758 struct kcore_copy_info *kci = arg;
1759
1760 if (!kci->first_module || start < kci->first_module)
1761 kci->first_module = start;
1762
1763 return 0;
1764}
1765
1766static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1767 const char *dir)
1768{
1769 char modules_filename[PATH_MAX];
1770
1771 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1772
1773 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1774 return -1;
1775
1776 if (modules__parse(modules_filename, kci,
1777 kcore_copy__process_modules) < 0)
1778 return -1;
1779
1780 return 0;
1781}
1782
1783static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
1784 u64 pgoff, u64 s, u64 e)
1785{
1786 u64 len, offset;
1787
1788 if (s < start || s >= end)
1789 return 0;
1790
1791 offset = (s - start) + pgoff;
1792 len = e < end ? e - s : end - s;
1793
1794 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
1795}
1796
1797static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1798{
1799 struct kcore_copy_info *kci = data;
1800 u64 end = start + len;
1801 struct sym_data *sdat;
1802
1803 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
1804 return -1;
1805
1806 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
1807 kci->last_module_symbol))
1808 return -1;
1809
1810 list_for_each_entry(sdat, &kci->syms, node) {
1811 u64 s = round_down(sdat->addr, page_size);
1812
1813 if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
1814 return -1;
1815 }
1816
1817 return 0;
1818}
1819
1820static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1821{
1822 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1823 return -1;
1824
1825 return 0;
1826}
1827
1828static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
1829{
1830 struct phdr_data *p, *k = NULL;
1831 u64 kend;
1832
1833 if (!kci->stext)
1834 return;
1835
1836 /* Find phdr that corresponds to the kernel map (contains stext) */
1837 kcore_copy__for_each_phdr(kci, p) {
1838 u64 pend = p->addr + p->len - 1;
1839
1840 if (p->addr <= kci->stext && pend >= kci->stext) {
1841 k = p;
1842 break;
1843 }
1844 }
1845
1846 if (!k)
1847 return;
1848
1849 kend = k->offset + k->len;
1850
1851 /* Find phdrs that remap the kernel */
1852 kcore_copy__for_each_phdr(kci, p) {
1853 u64 pend = p->offset + p->len;
1854
1855 if (p == k)
1856 continue;
1857
1858 if (p->offset >= k->offset && pend <= kend)
1859 p->remaps = k;
1860 }
1861}
1862
1863static void kcore_copy__layout(struct kcore_copy_info *kci)
1864{
1865 struct phdr_data *p;
1866 off_t rel = 0;
1867
1868 kcore_copy__find_remaps(kci);
1869
1870 kcore_copy__for_each_phdr(kci, p) {
1871 if (!p->remaps) {
1872 p->rel = rel;
1873 rel += p->len;
1874 }
1875 kci->phnum += 1;
1876 }
1877
1878 kcore_copy__for_each_phdr(kci, p) {
1879 struct phdr_data *k = p->remaps;
1880
1881 if (k)
1882 p->rel = p->offset - k->offset + k->rel;
1883 }
1884}
1885
1886static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1887 Elf *elf)
1888{
1889 if (kcore_copy__parse_kallsyms(kci, dir))
1890 return -1;
1891
1892 if (kcore_copy__parse_modules(kci, dir))
1893 return -1;
1894
1895 if (kci->stext)
1896 kci->stext = round_down(kci->stext, page_size);
1897 else
1898 kci->stext = round_down(kci->first_symbol, page_size);
1899
1900 if (kci->etext) {
1901 kci->etext = round_up(kci->etext, page_size);
1902 } else if (kci->last_symbol) {
1903 kci->etext = round_up(kci->last_symbol, page_size);
1904 kci->etext += page_size;
1905 }
1906
1907 if (kci->first_module_symbol &&
1908 (!kci->first_module || kci->first_module_symbol < kci->first_module))
1909 kci->first_module = kci->first_module_symbol;
1910
1911 kci->first_module = round_down(kci->first_module, page_size);
1912
1913 if (kci->last_module_symbol) {
1914 kci->last_module_symbol = round_up(kci->last_module_symbol,
1915 page_size);
1916 kci->last_module_symbol += page_size;
1917 }
1918
1919 if (!kci->stext || !kci->etext)
1920 return -1;
1921
1922 if (kci->first_module && !kci->last_module_symbol)
1923 return -1;
1924
1925 if (kcore_copy__read_maps(kci, elf))
1926 return -1;
1927
1928 kcore_copy__layout(kci);
1929
1930 return 0;
1931}
1932
1933static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1934 const char *name)
1935{
1936 char from_filename[PATH_MAX];
1937 char to_filename[PATH_MAX];
1938
1939 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1940 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1941
1942 return copyfile_mode(from_filename, to_filename, 0400);
1943}
1944
1945static int kcore_copy__unlink(const char *dir, const char *name)
1946{
1947 char filename[PATH_MAX];
1948
1949 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1950
1951 return unlink(filename);
1952}
1953
1954static int kcore_copy__compare_fds(int from, int to)
1955{
1956 char *buf_from;
1957 char *buf_to;
1958 ssize_t ret;
1959 size_t len;
1960 int err = -1;
1961
1962 buf_from = malloc(page_size);
1963 buf_to = malloc(page_size);
1964 if (!buf_from || !buf_to)
1965 goto out;
1966
1967 while (1) {
1968 /* Use read because mmap won't work on proc files */
1969 ret = read(from, buf_from, page_size);
1970 if (ret < 0)
1971 goto out;
1972
1973 if (!ret)
1974 break;
1975
1976 len = ret;
1977
1978 if (readn(to, buf_to, len) != (int)len)
1979 goto out;
1980
1981 if (memcmp(buf_from, buf_to, len))
1982 goto out;
1983 }
1984
1985 err = 0;
1986out:
1987 free(buf_to);
1988 free(buf_from);
1989 return err;
1990}
1991
1992static int kcore_copy__compare_files(const char *from_filename,
1993 const char *to_filename)
1994{
1995 int from, to, err = -1;
1996
1997 from = open(from_filename, O_RDONLY);
1998 if (from < 0)
1999 return -1;
2000
2001 to = open(to_filename, O_RDONLY);
2002 if (to < 0)
2003 goto out_close_from;
2004
2005 err = kcore_copy__compare_fds(from, to);
2006
2007 close(to);
2008out_close_from:
2009 close(from);
2010 return err;
2011}
2012
2013static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
2014 const char *name)
2015{
2016 char from_filename[PATH_MAX];
2017 char to_filename[PATH_MAX];
2018
2019 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
2020 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
2021
2022 return kcore_copy__compare_files(from_filename, to_filename);
2023}
2024
2025/**
2026 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
2027 * @from_dir: from directory
2028 * @to_dir: to directory
2029 *
2030 * This function copies kallsyms, modules and kcore files from one directory to
2031 * another. kallsyms and modules are copied entirely. Only code segments are
2032 * copied from kcore. It is assumed that two segments suffice: one for the
2033 * kernel proper and one for all the modules. The code segments are determined
2034 * from kallsyms and modules files. The kernel map starts at _stext or the
2035 * lowest function symbol, and ends at _etext or the highest function symbol.
2036 * The module map starts at the lowest module address and ends at the highest
2037 * module symbol. Start addresses are rounded down to the nearest page. End
2038 * addresses are rounded up to the nearest page. An extra page is added to the
2039 * highest kernel symbol and highest module symbol to, hopefully, encompass that
2040 * symbol too. Because it contains only code sections, the resulting kcore is
2041 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
2042 * is not the same for the kernel map and the modules map. That happens because
2043 * the data is copied adjacently whereas the original kcore has gaps. Finally,
2044 * kallsyms and modules files are compared with their copies to check that
2045 * modules have not been loaded or unloaded while the copies were taking place.
2046 *
2047 * Return: %0 on success, %-1 on failure.
2048 */
2049int kcore_copy(const char *from_dir, const char *to_dir)
2050{
2051 struct kcore kcore;
2052 struct kcore extract;
2053 int idx = 0, err = -1;
2054 off_t offset, sz;
2055 struct kcore_copy_info kci = { .stext = 0, };
2056 char kcore_filename[PATH_MAX];
2057 char extract_filename[PATH_MAX];
2058 struct phdr_data *p;
2059
2060 INIT_LIST_HEAD(&kci.phdrs);
2061 INIT_LIST_HEAD(&kci.syms);
2062
2063 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
2064 return -1;
2065
2066 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
2067 goto out_unlink_kallsyms;
2068
2069 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
2070 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
2071
2072 if (kcore__open(&kcore, kcore_filename))
2073 goto out_unlink_modules;
2074
2075 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
2076 goto out_kcore_close;
2077
2078 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
2079 goto out_kcore_close;
2080
2081 if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
2082 goto out_extract_close;
2083
2084 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
2085 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
2086 offset = round_up(offset, page_size);
2087
2088 kcore_copy__for_each_phdr(&kci, p) {
2089 off_t offs = p->rel + offset;
2090
2091 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
2092 goto out_extract_close;
2093 }
2094
2095 sz = kcore__write(&extract);
2096 if (sz < 0 || sz > offset)
2097 goto out_extract_close;
2098
2099 kcore_copy__for_each_phdr(&kci, p) {
2100 off_t offs = p->rel + offset;
2101
2102 if (p->remaps)
2103 continue;
2104 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
2105 goto out_extract_close;
2106 }
2107
2108 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
2109 goto out_extract_close;
2110
2111 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
2112 goto out_extract_close;
2113
2114 err = 0;
2115
2116out_extract_close:
2117 kcore__close(&extract);
2118 if (err)
2119 unlink(extract_filename);
2120out_kcore_close:
2121 kcore__close(&kcore);
2122out_unlink_modules:
2123 if (err)
2124 kcore_copy__unlink(to_dir, "modules");
2125out_unlink_kallsyms:
2126 if (err)
2127 kcore_copy__unlink(to_dir, "kallsyms");
2128
2129 kcore_copy__free_phdrs(&kci);
2130 kcore_copy__free_syms(&kci);
2131
2132 return err;
2133}
2134
2135int kcore_extract__create(struct kcore_extract *kce)
2136{
2137 struct kcore kcore;
2138 struct kcore extract;
2139 size_t count = 1;
2140 int idx = 0, err = -1;
2141 off_t offset = page_size, sz;
2142
2143 if (kcore__open(&kcore, kce->kcore_filename))
2144 return -1;
2145
2146 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
2147 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
2148 goto out_kcore_close;
2149
2150 if (kcore__copy_hdr(&kcore, &extract, count))
2151 goto out_extract_close;
2152
2153 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
2154 goto out_extract_close;
2155
2156 sz = kcore__write(&extract);
2157 if (sz < 0 || sz > offset)
2158 goto out_extract_close;
2159
2160 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
2161 goto out_extract_close;
2162
2163 err = 0;
2164
2165out_extract_close:
2166 kcore__close(&extract);
2167 if (err)
2168 unlink(kce->extract_filename);
2169out_kcore_close:
2170 kcore__close(&kcore);
2171
2172 return err;
2173}
2174
2175void kcore_extract__delete(struct kcore_extract *kce)
2176{
2177 unlink(kce->extract_filename);
2178}
2179
2180#ifdef HAVE_GELF_GETNOTE_SUPPORT
2181
2182static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
2183{
2184 if (!base_off)
2185 return;
2186
2187 if (tmp->bit32)
2188 tmp->addr.a32[SDT_NOTE_IDX_LOC] =
2189 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
2190 tmp->addr.a32[SDT_NOTE_IDX_BASE];
2191 else
2192 tmp->addr.a64[SDT_NOTE_IDX_LOC] =
2193 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
2194 tmp->addr.a64[SDT_NOTE_IDX_BASE];
2195}
2196
2197static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
2198 GElf_Addr base_off)
2199{
2200 if (!base_off)
2201 return;
2202
2203 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
2204 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2205 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
2206 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2207}
2208
2209/**
2210 * populate_sdt_note : Parse raw data and identify SDT note
2211 * @elf: elf of the opened file
2212 * @data: raw data of a section with description offset applied
2213 * @len: note description size
2214 * @type: type of the note
2215 * @sdt_notes: List to add the SDT note
2216 *
2217 * Responsible for parsing the @data in section .note.stapsdt in @elf and
2218 * if its an SDT note, it appends to @sdt_notes list.
2219 */
2220static int populate_sdt_note(Elf **elf, const char *data, size_t len,
2221 struct list_head *sdt_notes)
2222{
2223 const char *provider, *name, *args;
2224 struct sdt_note *tmp = NULL;
2225 GElf_Ehdr ehdr;
2226 GElf_Shdr shdr;
2227 int ret = -EINVAL;
2228
2229 union {
2230 Elf64_Addr a64[NR_ADDR];
2231 Elf32_Addr a32[NR_ADDR];
2232 } buf;
2233
2234 Elf_Data dst = {
2235 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
2236 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
2237 .d_off = 0, .d_align = 0
2238 };
2239 Elf_Data src = {
2240 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
2241 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
2242 .d_align = 0
2243 };
2244
2245 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
2246 if (!tmp) {
2247 ret = -ENOMEM;
2248 goto out_err;
2249 }
2250
2251 INIT_LIST_HEAD(&tmp->note_list);
2252
2253 if (len < dst.d_size + 3)
2254 goto out_free_note;
2255
2256 /* Translation from file representation to memory representation */
2257 if (gelf_xlatetom(*elf, &dst, &src,
2258 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
2259 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
2260 goto out_free_note;
2261 }
2262
2263 /* Populate the fields of sdt_note */
2264 provider = data + dst.d_size;
2265
2266 name = (const char *)memchr(provider, '\0', data + len - provider);
2267 if (name++ == NULL)
2268 goto out_free_note;
2269
2270 tmp->provider = strdup(provider);
2271 if (!tmp->provider) {
2272 ret = -ENOMEM;
2273 goto out_free_note;
2274 }
2275 tmp->name = strdup(name);
2276 if (!tmp->name) {
2277 ret = -ENOMEM;
2278 goto out_free_prov;
2279 }
2280
2281 args = memchr(name, '\0', data + len - name);
2282
2283 /*
2284 * There is no argument if:
2285 * - We reached the end of the note;
2286 * - There is not enough room to hold a potential string;
2287 * - The argument string is empty or just contains ':'.
2288 */
2289 if (args == NULL || data + len - args < 2 ||
2290 args[1] == ':' || args[1] == '\0')
2291 tmp->args = NULL;
2292 else {
2293 tmp->args = strdup(++args);
2294 if (!tmp->args) {
2295 ret = -ENOMEM;
2296 goto out_free_name;
2297 }
2298 }
2299
2300 if (gelf_getclass(*elf) == ELFCLASS32) {
2301 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
2302 tmp->bit32 = true;
2303 } else {
2304 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
2305 tmp->bit32 = false;
2306 }
2307
2308 if (!gelf_getehdr(*elf, &ehdr)) {
2309 pr_debug("%s : cannot get elf header.\n", __func__);
2310 ret = -EBADF;
2311 goto out_free_args;
2312 }
2313
2314 /* Adjust the prelink effect :
2315 * Find out the .stapsdt.base section.
2316 * This scn will help us to handle prelinking (if present).
2317 * Compare the retrieved file offset of the base section with the
2318 * base address in the description of the SDT note. If its different,
2319 * then accordingly, adjust the note location.
2320 */
2321 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
2322 sdt_adjust_loc(tmp, shdr.sh_offset);
2323
2324 /* Adjust reference counter offset */
2325 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
2326 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
2327
2328 list_add_tail(&tmp->note_list, sdt_notes);
2329 return 0;
2330
2331out_free_args:
2332 zfree(&tmp->args);
2333out_free_name:
2334 zfree(&tmp->name);
2335out_free_prov:
2336 zfree(&tmp->provider);
2337out_free_note:
2338 free(tmp);
2339out_err:
2340 return ret;
2341}
2342
2343/**
2344 * construct_sdt_notes_list : constructs a list of SDT notes
2345 * @elf : elf to look into
2346 * @sdt_notes : empty list_head
2347 *
2348 * Scans the sections in 'elf' for the section
2349 * .note.stapsdt. It, then calls populate_sdt_note to find
2350 * out the SDT events and populates the 'sdt_notes'.
2351 */
2352static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
2353{
2354 GElf_Ehdr ehdr;
2355 Elf_Scn *scn = NULL;
2356 Elf_Data *data;
2357 GElf_Shdr shdr;
2358 size_t shstrndx, next;
2359 GElf_Nhdr nhdr;
2360 size_t name_off, desc_off, offset;
2361 int ret = 0;
2362
2363 if (gelf_getehdr(elf, &ehdr) == NULL) {
2364 ret = -EBADF;
2365 goto out_ret;
2366 }
2367 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
2368 ret = -EBADF;
2369 goto out_ret;
2370 }
2371
2372 /* Look for the required section */
2373 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
2374 if (!scn) {
2375 ret = -ENOENT;
2376 goto out_ret;
2377 }
2378
2379 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
2380 ret = -ENOENT;
2381 goto out_ret;
2382 }
2383
2384 data = elf_getdata(scn, NULL);
2385
2386 /* Get the SDT notes */
2387 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
2388 &desc_off)) > 0; offset = next) {
2389 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
2390 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
2391 sizeof(SDT_NOTE_NAME))) {
2392 /* Check the type of the note */
2393 if (nhdr.n_type != SDT_NOTE_TYPE)
2394 goto out_ret;
2395
2396 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
2397 nhdr.n_descsz, sdt_notes);
2398 if (ret < 0)
2399 goto out_ret;
2400 }
2401 }
2402 if (list_empty(sdt_notes))
2403 ret = -ENOENT;
2404
2405out_ret:
2406 return ret;
2407}
2408
2409/**
2410 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2411 * @head : empty list_head
2412 * @target : file to find SDT notes from
2413 *
2414 * This opens the file, initializes
2415 * the ELF and then calls construct_sdt_notes_list.
2416 */
2417int get_sdt_note_list(struct list_head *head, const char *target)
2418{
2419 Elf *elf;
2420 int fd, ret;
2421
2422 fd = open(target, O_RDONLY);
2423 if (fd < 0)
2424 return -EBADF;
2425
2426 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2427 if (!elf) {
2428 ret = -EBADF;
2429 goto out_close;
2430 }
2431 ret = construct_sdt_notes_list(elf, head);
2432 elf_end(elf);
2433out_close:
2434 close(fd);
2435 return ret;
2436}
2437
2438/**
2439 * cleanup_sdt_note_list : free the sdt notes' list
2440 * @sdt_notes: sdt notes' list
2441 *
2442 * Free up the SDT notes in @sdt_notes.
2443 * Returns the number of SDT notes free'd.
2444 */
2445int cleanup_sdt_note_list(struct list_head *sdt_notes)
2446{
2447 struct sdt_note *tmp, *pos;
2448 int nr_free = 0;
2449
2450 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
2451 list_del_init(&pos->note_list);
2452 zfree(&pos->args);
2453 zfree(&pos->name);
2454 zfree(&pos->provider);
2455 free(pos);
2456 nr_free++;
2457 }
2458 return nr_free;
2459}
2460
2461/**
2462 * sdt_notes__get_count: Counts the number of sdt events
2463 * @start: list_head to sdt_notes list
2464 *
2465 * Returns the number of SDT notes in a list
2466 */
2467int sdt_notes__get_count(struct list_head *start)
2468{
2469 struct sdt_note *sdt_ptr;
2470 int count = 0;
2471
2472 list_for_each_entry(sdt_ptr, start, note_list)
2473 count++;
2474 return count;
2475}
2476#endif
2477
2478void symbol__elf_init(void)
2479{
2480 elf_version(EV_CURRENT);
2481}