Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <stdlib.h>
5#include <stdio.h>
6#include <string.h>
7#include <linux/capability.h>
8#include <linux/kernel.h>
9#include <linux/mman.h>
10#include <linux/string.h>
11#include <linux/time64.h>
12#include <sys/types.h>
13#include <sys/stat.h>
14#include <sys/param.h>
15#include <fcntl.h>
16#include <unistd.h>
17#include <inttypes.h>
18#include "annotate.h"
19#include "build-id.h"
20#include "cap.h"
21#include "dso.h"
22#include "util.h" // lsdir()
23#include "debug.h"
24#include "event.h"
25#include "machine.h"
26#include "map.h"
27#include "symbol.h"
28#include "map_symbol.h"
29#include "mem-events.h"
30#include "symsrc.h"
31#include "strlist.h"
32#include "intlist.h"
33#include "namespaces.h"
34#include "header.h"
35#include "path.h"
36#include <linux/ctype.h>
37#include <linux/zalloc.h>
38
39#include <elf.h>
40#include <limits.h>
41#include <symbol/kallsyms.h>
42#include <sys/utsname.h>
43
44static int dso__load_kernel_sym(struct dso *dso, struct map *map);
45static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
46static bool symbol__is_idle(const char *name);
47
48int vmlinux_path__nr_entries;
49char **vmlinux_path;
50
51struct symbol_conf symbol_conf = {
52 .nanosecs = false,
53 .use_modules = true,
54 .try_vmlinux_path = true,
55 .demangle = true,
56 .demangle_kernel = false,
57 .cumulate_callchain = true,
58 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
59 .show_hist_headers = true,
60 .symfs = "",
61 .event_group = true,
62 .inline_name = true,
63 .res_sample = 0,
64};
65
66static enum dso_binary_type binary_type_symtab[] = {
67 DSO_BINARY_TYPE__KALLSYMS,
68 DSO_BINARY_TYPE__GUEST_KALLSYMS,
69 DSO_BINARY_TYPE__JAVA_JIT,
70 DSO_BINARY_TYPE__DEBUGLINK,
71 DSO_BINARY_TYPE__BUILD_ID_CACHE,
72 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
73 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
74 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
75 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
76 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
77 DSO_BINARY_TYPE__GUEST_KMODULE,
78 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
79 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
80 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
81 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
82 DSO_BINARY_TYPE__NOT_FOUND,
83};
84
85#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
86
87static bool symbol_type__filter(char symbol_type)
88{
89 symbol_type = toupper(symbol_type);
90 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
91}
92
93static int prefix_underscores_count(const char *str)
94{
95 const char *tail = str;
96
97 while (*tail == '_')
98 tail++;
99
100 return tail - str;
101}
102
103void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
104{
105 p->end = c->start;
106}
107
108const char * __weak arch__normalize_symbol_name(const char *name)
109{
110 return name;
111}
112
113int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
114{
115 return strcmp(namea, nameb);
116}
117
118int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
119 unsigned int n)
120{
121 return strncmp(namea, nameb, n);
122}
123
124int __weak arch__choose_best_symbol(struct symbol *syma,
125 struct symbol *symb __maybe_unused)
126{
127 /* Avoid "SyS" kernel syscall aliases */
128 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
129 return SYMBOL_B;
130 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
131 return SYMBOL_B;
132
133 return SYMBOL_A;
134}
135
136static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
137{
138 s64 a;
139 s64 b;
140 size_t na, nb;
141
142 /* Prefer a symbol with non zero length */
143 a = syma->end - syma->start;
144 b = symb->end - symb->start;
145 if ((b == 0) && (a > 0))
146 return SYMBOL_A;
147 else if ((a == 0) && (b > 0))
148 return SYMBOL_B;
149
150 /* Prefer a non weak symbol over a weak one */
151 a = syma->binding == STB_WEAK;
152 b = symb->binding == STB_WEAK;
153 if (b && !a)
154 return SYMBOL_A;
155 if (a && !b)
156 return SYMBOL_B;
157
158 /* Prefer a global symbol over a non global one */
159 a = syma->binding == STB_GLOBAL;
160 b = symb->binding == STB_GLOBAL;
161 if (a && !b)
162 return SYMBOL_A;
163 if (b && !a)
164 return SYMBOL_B;
165
166 /* Prefer a symbol with less underscores */
167 a = prefix_underscores_count(syma->name);
168 b = prefix_underscores_count(symb->name);
169 if (b > a)
170 return SYMBOL_A;
171 else if (a > b)
172 return SYMBOL_B;
173
174 /* Choose the symbol with the longest name */
175 na = strlen(syma->name);
176 nb = strlen(symb->name);
177 if (na > nb)
178 return SYMBOL_A;
179 else if (na < nb)
180 return SYMBOL_B;
181
182 return arch__choose_best_symbol(syma, symb);
183}
184
185void symbols__fixup_duplicate(struct rb_root_cached *symbols)
186{
187 struct rb_node *nd;
188 struct symbol *curr, *next;
189
190 if (symbol_conf.allow_aliases)
191 return;
192
193 nd = rb_first_cached(symbols);
194
195 while (nd) {
196 curr = rb_entry(nd, struct symbol, rb_node);
197again:
198 nd = rb_next(&curr->rb_node);
199 next = rb_entry(nd, struct symbol, rb_node);
200
201 if (!nd)
202 break;
203
204 if (curr->start != next->start)
205 continue;
206
207 if (choose_best_symbol(curr, next) == SYMBOL_A) {
208 rb_erase_cached(&next->rb_node, symbols);
209 symbol__delete(next);
210 goto again;
211 } else {
212 nd = rb_next(&curr->rb_node);
213 rb_erase_cached(&curr->rb_node, symbols);
214 symbol__delete(curr);
215 }
216 }
217}
218
219void symbols__fixup_end(struct rb_root_cached *symbols)
220{
221 struct rb_node *nd, *prevnd = rb_first_cached(symbols);
222 struct symbol *curr, *prev;
223
224 if (prevnd == NULL)
225 return;
226
227 curr = rb_entry(prevnd, struct symbol, rb_node);
228
229 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
230 prev = curr;
231 curr = rb_entry(nd, struct symbol, rb_node);
232
233 if (prev->end == prev->start && prev->end != curr->start)
234 arch__symbols__fixup_end(prev, curr);
235 }
236
237 /* Last entry */
238 if (curr->end == curr->start)
239 curr->end = roundup(curr->start, 4096) + 4096;
240}
241
242void map_groups__fixup_end(struct map_groups *mg)
243{
244 struct maps *maps = &mg->maps;
245 struct map *next, *curr;
246
247 down_write(&maps->lock);
248
249 curr = maps__first(maps);
250 if (curr == NULL)
251 goto out_unlock;
252
253 for (next = map__next(curr); next; next = map__next(curr)) {
254 if (!curr->end)
255 curr->end = next->start;
256 curr = next;
257 }
258
259 /*
260 * We still haven't the actual symbols, so guess the
261 * last map final address.
262 */
263 if (!curr->end)
264 curr->end = ~0ULL;
265
266out_unlock:
267 up_write(&maps->lock);
268}
269
270struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
271{
272 size_t namelen = strlen(name) + 1;
273 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
274 sizeof(*sym) + namelen));
275 if (sym == NULL)
276 return NULL;
277
278 if (symbol_conf.priv_size) {
279 if (symbol_conf.init_annotation) {
280 struct annotation *notes = (void *)sym;
281 pthread_mutex_init(¬es->lock, NULL);
282 }
283 sym = ((void *)sym) + symbol_conf.priv_size;
284 }
285
286 sym->start = start;
287 sym->end = len ? start + len : start;
288 sym->type = type;
289 sym->binding = binding;
290 sym->namelen = namelen - 1;
291
292 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
293 __func__, name, start, sym->end);
294 memcpy(sym->name, name, namelen);
295
296 return sym;
297}
298
299void symbol__delete(struct symbol *sym)
300{
301 free(((void *)sym) - symbol_conf.priv_size);
302}
303
304void symbols__delete(struct rb_root_cached *symbols)
305{
306 struct symbol *pos;
307 struct rb_node *next = rb_first_cached(symbols);
308
309 while (next) {
310 pos = rb_entry(next, struct symbol, rb_node);
311 next = rb_next(&pos->rb_node);
312 rb_erase_cached(&pos->rb_node, symbols);
313 symbol__delete(pos);
314 }
315}
316
317void __symbols__insert(struct rb_root_cached *symbols,
318 struct symbol *sym, bool kernel)
319{
320 struct rb_node **p = &symbols->rb_root.rb_node;
321 struct rb_node *parent = NULL;
322 const u64 ip = sym->start;
323 struct symbol *s;
324 bool leftmost = true;
325
326 if (kernel) {
327 const char *name = sym->name;
328 /*
329 * ppc64 uses function descriptors and appends a '.' to the
330 * start of every instruction address. Remove it.
331 */
332 if (name[0] == '.')
333 name++;
334 sym->idle = symbol__is_idle(name);
335 }
336
337 while (*p != NULL) {
338 parent = *p;
339 s = rb_entry(parent, struct symbol, rb_node);
340 if (ip < s->start)
341 p = &(*p)->rb_left;
342 else {
343 p = &(*p)->rb_right;
344 leftmost = false;
345 }
346 }
347 rb_link_node(&sym->rb_node, parent, p);
348 rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
349}
350
351void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
352{
353 __symbols__insert(symbols, sym, false);
354}
355
356static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
357{
358 struct rb_node *n;
359
360 if (symbols == NULL)
361 return NULL;
362
363 n = symbols->rb_root.rb_node;
364
365 while (n) {
366 struct symbol *s = rb_entry(n, struct symbol, rb_node);
367
368 if (ip < s->start)
369 n = n->rb_left;
370 else if (ip > s->end || (ip == s->end && ip != s->start))
371 n = n->rb_right;
372 else
373 return s;
374 }
375
376 return NULL;
377}
378
379static struct symbol *symbols__first(struct rb_root_cached *symbols)
380{
381 struct rb_node *n = rb_first_cached(symbols);
382
383 if (n)
384 return rb_entry(n, struct symbol, rb_node);
385
386 return NULL;
387}
388
389static struct symbol *symbols__last(struct rb_root_cached *symbols)
390{
391 struct rb_node *n = rb_last(&symbols->rb_root);
392
393 if (n)
394 return rb_entry(n, struct symbol, rb_node);
395
396 return NULL;
397}
398
399static struct symbol *symbols__next(struct symbol *sym)
400{
401 struct rb_node *n = rb_next(&sym->rb_node);
402
403 if (n)
404 return rb_entry(n, struct symbol, rb_node);
405
406 return NULL;
407}
408
409static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
410{
411 struct rb_node **p = &symbols->rb_root.rb_node;
412 struct rb_node *parent = NULL;
413 struct symbol_name_rb_node *symn, *s;
414 bool leftmost = true;
415
416 symn = container_of(sym, struct symbol_name_rb_node, sym);
417
418 while (*p != NULL) {
419 parent = *p;
420 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
421 if (strcmp(sym->name, s->sym.name) < 0)
422 p = &(*p)->rb_left;
423 else {
424 p = &(*p)->rb_right;
425 leftmost = false;
426 }
427 }
428 rb_link_node(&symn->rb_node, parent, p);
429 rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
430}
431
432static void symbols__sort_by_name(struct rb_root_cached *symbols,
433 struct rb_root_cached *source)
434{
435 struct rb_node *nd;
436
437 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
438 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
439 symbols__insert_by_name(symbols, pos);
440 }
441}
442
443int symbol__match_symbol_name(const char *name, const char *str,
444 enum symbol_tag_include includes)
445{
446 const char *versioning;
447
448 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
449 (versioning = strstr(name, "@@"))) {
450 int len = strlen(str);
451
452 if (len < versioning - name)
453 len = versioning - name;
454
455 return arch__compare_symbol_names_n(name, str, len);
456 } else
457 return arch__compare_symbol_names(name, str);
458}
459
460static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
461 const char *name,
462 enum symbol_tag_include includes)
463{
464 struct rb_node *n;
465 struct symbol_name_rb_node *s = NULL;
466
467 if (symbols == NULL)
468 return NULL;
469
470 n = symbols->rb_root.rb_node;
471
472 while (n) {
473 int cmp;
474
475 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
476 cmp = symbol__match_symbol_name(s->sym.name, name, includes);
477
478 if (cmp > 0)
479 n = n->rb_left;
480 else if (cmp < 0)
481 n = n->rb_right;
482 else
483 break;
484 }
485
486 if (n == NULL)
487 return NULL;
488
489 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
490 /* return first symbol that has same name (if any) */
491 for (n = rb_prev(n); n; n = rb_prev(n)) {
492 struct symbol_name_rb_node *tmp;
493
494 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
495 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
496 break;
497
498 s = tmp;
499 }
500
501 return &s->sym;
502}
503
504void dso__reset_find_symbol_cache(struct dso *dso)
505{
506 dso->last_find_result.addr = 0;
507 dso->last_find_result.symbol = NULL;
508}
509
510void dso__insert_symbol(struct dso *dso, struct symbol *sym)
511{
512 __symbols__insert(&dso->symbols, sym, dso->kernel);
513
514 /* update the symbol cache if necessary */
515 if (dso->last_find_result.addr >= sym->start &&
516 (dso->last_find_result.addr < sym->end ||
517 sym->start == sym->end)) {
518 dso->last_find_result.symbol = sym;
519 }
520}
521
522struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
523{
524 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
525 dso->last_find_result.addr = addr;
526 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
527 }
528
529 return dso->last_find_result.symbol;
530}
531
532struct symbol *dso__first_symbol(struct dso *dso)
533{
534 return symbols__first(&dso->symbols);
535}
536
537struct symbol *dso__last_symbol(struct dso *dso)
538{
539 return symbols__last(&dso->symbols);
540}
541
542struct symbol *dso__next_symbol(struct symbol *sym)
543{
544 return symbols__next(sym);
545}
546
547struct symbol *symbol__next_by_name(struct symbol *sym)
548{
549 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
550 struct rb_node *n = rb_next(&s->rb_node);
551
552 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
553}
554
555 /*
556 * Returns first symbol that matched with @name.
557 */
558struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
559{
560 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
561 SYMBOL_TAG_INCLUDE__NONE);
562 if (!s)
563 s = symbols__find_by_name(&dso->symbol_names, name,
564 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
565 return s;
566}
567
568void dso__sort_by_name(struct dso *dso)
569{
570 dso__set_sorted_by_name(dso);
571 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
572}
573
574int modules__parse(const char *filename, void *arg,
575 int (*process_module)(void *arg, const char *name,
576 u64 start, u64 size))
577{
578 char *line = NULL;
579 size_t n;
580 FILE *file;
581 int err = 0;
582
583 file = fopen(filename, "r");
584 if (file == NULL)
585 return -1;
586
587 while (1) {
588 char name[PATH_MAX];
589 u64 start, size;
590 char *sep, *endptr;
591 ssize_t line_len;
592
593 line_len = getline(&line, &n, file);
594 if (line_len < 0) {
595 if (feof(file))
596 break;
597 err = -1;
598 goto out;
599 }
600
601 if (!line) {
602 err = -1;
603 goto out;
604 }
605
606 line[--line_len] = '\0'; /* \n */
607
608 sep = strrchr(line, 'x');
609 if (sep == NULL)
610 continue;
611
612 hex2u64(sep + 1, &start);
613
614 sep = strchr(line, ' ');
615 if (sep == NULL)
616 continue;
617
618 *sep = '\0';
619
620 scnprintf(name, sizeof(name), "[%s]", line);
621
622 size = strtoul(sep + 1, &endptr, 0);
623 if (*endptr != ' ' && *endptr != '\t')
624 continue;
625
626 err = process_module(arg, name, start, size);
627 if (err)
628 break;
629 }
630out:
631 free(line);
632 fclose(file);
633 return err;
634}
635
636/*
637 * These are symbols in the kernel image, so make sure that
638 * sym is from a kernel DSO.
639 */
640static bool symbol__is_idle(const char *name)
641{
642 const char * const idle_symbols[] = {
643 "arch_cpu_idle",
644 "cpu_idle",
645 "cpu_startup_entry",
646 "intel_idle",
647 "default_idle",
648 "native_safe_halt",
649 "enter_idle",
650 "exit_idle",
651 "mwait_idle",
652 "mwait_idle_with_hints",
653 "poll_idle",
654 "ppc64_runlatch_off",
655 "pseries_dedicated_idle_sleep",
656 NULL
657 };
658 int i;
659
660 for (i = 0; idle_symbols[i]; i++) {
661 if (!strcmp(idle_symbols[i], name))
662 return true;
663 }
664
665 return false;
666}
667
668static int map__process_kallsym_symbol(void *arg, const char *name,
669 char type, u64 start)
670{
671 struct symbol *sym;
672 struct dso *dso = arg;
673 struct rb_root_cached *root = &dso->symbols;
674
675 if (!symbol_type__filter(type))
676 return 0;
677
678 /*
679 * module symbols are not sorted so we add all
680 * symbols, setting length to 0, and rely on
681 * symbols__fixup_end() to fix it up.
682 */
683 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
684 if (sym == NULL)
685 return -ENOMEM;
686 /*
687 * We will pass the symbols to the filter later, in
688 * map__split_kallsyms, when we have split the maps per module
689 */
690 __symbols__insert(root, sym, !strchr(name, '['));
691
692 return 0;
693}
694
695/*
696 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
697 * so that we can in the next step set the symbol ->end address and then
698 * call kernel_maps__split_kallsyms.
699 */
700static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
701{
702 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
703}
704
705static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
706{
707 struct map *curr_map;
708 struct symbol *pos;
709 int count = 0;
710 struct rb_root_cached old_root = dso->symbols;
711 struct rb_root_cached *root = &dso->symbols;
712 struct rb_node *next = rb_first_cached(root);
713
714 if (!kmaps)
715 return -1;
716
717 *root = RB_ROOT_CACHED;
718
719 while (next) {
720 char *module;
721
722 pos = rb_entry(next, struct symbol, rb_node);
723 next = rb_next(&pos->rb_node);
724
725 rb_erase_cached(&pos->rb_node, &old_root);
726 RB_CLEAR_NODE(&pos->rb_node);
727 module = strchr(pos->name, '\t');
728 if (module)
729 *module = '\0';
730
731 curr_map = map_groups__find(kmaps, pos->start);
732
733 if (!curr_map) {
734 symbol__delete(pos);
735 continue;
736 }
737
738 pos->start -= curr_map->start - curr_map->pgoff;
739 if (pos->end > curr_map->end)
740 pos->end = curr_map->end;
741 if (pos->end)
742 pos->end -= curr_map->start - curr_map->pgoff;
743 symbols__insert(&curr_map->dso->symbols, pos);
744 ++count;
745 }
746
747 /* Symbols have been adjusted */
748 dso->adjust_symbols = 1;
749
750 return count;
751}
752
753/*
754 * Split the symbols into maps, making sure there are no overlaps, i.e. the
755 * kernel range is broken in several maps, named [kernel].N, as we don't have
756 * the original ELF section names vmlinux have.
757 */
758static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
759 struct map *initial_map)
760{
761 struct machine *machine;
762 struct map *curr_map = initial_map;
763 struct symbol *pos;
764 int count = 0, moved = 0;
765 struct rb_root_cached *root = &dso->symbols;
766 struct rb_node *next = rb_first_cached(root);
767 int kernel_range = 0;
768 bool x86_64;
769
770 if (!kmaps)
771 return -1;
772
773 machine = kmaps->machine;
774
775 x86_64 = machine__is(machine, "x86_64");
776
777 while (next) {
778 char *module;
779
780 pos = rb_entry(next, struct symbol, rb_node);
781 next = rb_next(&pos->rb_node);
782
783 module = strchr(pos->name, '\t');
784 if (module) {
785 if (!symbol_conf.use_modules)
786 goto discard_symbol;
787
788 *module++ = '\0';
789
790 if (strcmp(curr_map->dso->short_name, module)) {
791 if (curr_map != initial_map &&
792 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
793 machine__is_default_guest(machine)) {
794 /*
795 * We assume all symbols of a module are
796 * continuous in * kallsyms, so curr_map
797 * points to a module and all its
798 * symbols are in its kmap. Mark it as
799 * loaded.
800 */
801 dso__set_loaded(curr_map->dso);
802 }
803
804 curr_map = map_groups__find_by_name(kmaps, module);
805 if (curr_map == NULL) {
806 pr_debug("%s/proc/{kallsyms,modules} "
807 "inconsistency while looking "
808 "for \"%s\" module!\n",
809 machine->root_dir, module);
810 curr_map = initial_map;
811 goto discard_symbol;
812 }
813
814 if (curr_map->dso->loaded &&
815 !machine__is_default_guest(machine))
816 goto discard_symbol;
817 }
818 /*
819 * So that we look just like we get from .ko files,
820 * i.e. not prelinked, relative to initial_map->start.
821 */
822 pos->start = curr_map->map_ip(curr_map, pos->start);
823 pos->end = curr_map->map_ip(curr_map, pos->end);
824 } else if (x86_64 && is_entry_trampoline(pos->name)) {
825 /*
826 * These symbols are not needed anymore since the
827 * trampoline maps refer to the text section and it's
828 * symbols instead. Avoid having to deal with
829 * relocations, and the assumption that the first symbol
830 * is the start of kernel text, by simply removing the
831 * symbols at this point.
832 */
833 goto discard_symbol;
834 } else if (curr_map != initial_map) {
835 char dso_name[PATH_MAX];
836 struct dso *ndso;
837
838 if (delta) {
839 /* Kernel was relocated at boot time */
840 pos->start -= delta;
841 pos->end -= delta;
842 }
843
844 if (count == 0) {
845 curr_map = initial_map;
846 goto add_symbol;
847 }
848
849 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
850 snprintf(dso_name, sizeof(dso_name),
851 "[guest.kernel].%d",
852 kernel_range++);
853 else
854 snprintf(dso_name, sizeof(dso_name),
855 "[kernel].%d",
856 kernel_range++);
857
858 ndso = dso__new(dso_name);
859 if (ndso == NULL)
860 return -1;
861
862 ndso->kernel = dso->kernel;
863
864 curr_map = map__new2(pos->start, ndso);
865 if (curr_map == NULL) {
866 dso__put(ndso);
867 return -1;
868 }
869
870 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
871 map_groups__insert(kmaps, curr_map);
872 ++kernel_range;
873 } else if (delta) {
874 /* Kernel was relocated at boot time */
875 pos->start -= delta;
876 pos->end -= delta;
877 }
878add_symbol:
879 if (curr_map != initial_map) {
880 rb_erase_cached(&pos->rb_node, root);
881 symbols__insert(&curr_map->dso->symbols, pos);
882 ++moved;
883 } else
884 ++count;
885
886 continue;
887discard_symbol:
888 rb_erase_cached(&pos->rb_node, root);
889 symbol__delete(pos);
890 }
891
892 if (curr_map != initial_map &&
893 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
894 machine__is_default_guest(kmaps->machine)) {
895 dso__set_loaded(curr_map->dso);
896 }
897
898 return count + moved;
899}
900
901bool symbol__restricted_filename(const char *filename,
902 const char *restricted_filename)
903{
904 bool restricted = false;
905
906 if (symbol_conf.kptr_restrict) {
907 char *r = realpath(filename, NULL);
908
909 if (r != NULL) {
910 restricted = strcmp(r, restricted_filename) == 0;
911 free(r);
912 return restricted;
913 }
914 }
915
916 return restricted;
917}
918
919struct module_info {
920 struct rb_node rb_node;
921 char *name;
922 u64 start;
923};
924
925static void add_module(struct module_info *mi, struct rb_root *modules)
926{
927 struct rb_node **p = &modules->rb_node;
928 struct rb_node *parent = NULL;
929 struct module_info *m;
930
931 while (*p != NULL) {
932 parent = *p;
933 m = rb_entry(parent, struct module_info, rb_node);
934 if (strcmp(mi->name, m->name) < 0)
935 p = &(*p)->rb_left;
936 else
937 p = &(*p)->rb_right;
938 }
939 rb_link_node(&mi->rb_node, parent, p);
940 rb_insert_color(&mi->rb_node, modules);
941}
942
943static void delete_modules(struct rb_root *modules)
944{
945 struct module_info *mi;
946 struct rb_node *next = rb_first(modules);
947
948 while (next) {
949 mi = rb_entry(next, struct module_info, rb_node);
950 next = rb_next(&mi->rb_node);
951 rb_erase(&mi->rb_node, modules);
952 zfree(&mi->name);
953 free(mi);
954 }
955}
956
957static struct module_info *find_module(const char *name,
958 struct rb_root *modules)
959{
960 struct rb_node *n = modules->rb_node;
961
962 while (n) {
963 struct module_info *m;
964 int cmp;
965
966 m = rb_entry(n, struct module_info, rb_node);
967 cmp = strcmp(name, m->name);
968 if (cmp < 0)
969 n = n->rb_left;
970 else if (cmp > 0)
971 n = n->rb_right;
972 else
973 return m;
974 }
975
976 return NULL;
977}
978
979static int __read_proc_modules(void *arg, const char *name, u64 start,
980 u64 size __maybe_unused)
981{
982 struct rb_root *modules = arg;
983 struct module_info *mi;
984
985 mi = zalloc(sizeof(struct module_info));
986 if (!mi)
987 return -ENOMEM;
988
989 mi->name = strdup(name);
990 mi->start = start;
991
992 if (!mi->name) {
993 free(mi);
994 return -ENOMEM;
995 }
996
997 add_module(mi, modules);
998
999 return 0;
1000}
1001
1002static int read_proc_modules(const char *filename, struct rb_root *modules)
1003{
1004 if (symbol__restricted_filename(filename, "/proc/modules"))
1005 return -1;
1006
1007 if (modules__parse(filename, modules, __read_proc_modules)) {
1008 delete_modules(modules);
1009 return -1;
1010 }
1011
1012 return 0;
1013}
1014
1015int compare_proc_modules(const char *from, const char *to)
1016{
1017 struct rb_root from_modules = RB_ROOT;
1018 struct rb_root to_modules = RB_ROOT;
1019 struct rb_node *from_node, *to_node;
1020 struct module_info *from_m, *to_m;
1021 int ret = -1;
1022
1023 if (read_proc_modules(from, &from_modules))
1024 return -1;
1025
1026 if (read_proc_modules(to, &to_modules))
1027 goto out_delete_from;
1028
1029 from_node = rb_first(&from_modules);
1030 to_node = rb_first(&to_modules);
1031 while (from_node) {
1032 if (!to_node)
1033 break;
1034
1035 from_m = rb_entry(from_node, struct module_info, rb_node);
1036 to_m = rb_entry(to_node, struct module_info, rb_node);
1037
1038 if (from_m->start != to_m->start ||
1039 strcmp(from_m->name, to_m->name))
1040 break;
1041
1042 from_node = rb_next(from_node);
1043 to_node = rb_next(to_node);
1044 }
1045
1046 if (!from_node && !to_node)
1047 ret = 0;
1048
1049 delete_modules(&to_modules);
1050out_delete_from:
1051 delete_modules(&from_modules);
1052
1053 return ret;
1054}
1055
1056struct map *map_groups__first(struct map_groups *mg)
1057{
1058 return maps__first(&mg->maps);
1059}
1060
1061static int do_validate_kcore_modules(const char *filename,
1062 struct map_groups *kmaps)
1063{
1064 struct rb_root modules = RB_ROOT;
1065 struct map *old_map;
1066 int err;
1067
1068 err = read_proc_modules(filename, &modules);
1069 if (err)
1070 return err;
1071
1072 old_map = map_groups__first(kmaps);
1073 while (old_map) {
1074 struct map *next = map_groups__next(old_map);
1075 struct module_info *mi;
1076
1077 if (!__map__is_kmodule(old_map)) {
1078 old_map = next;
1079 continue;
1080 }
1081
1082 /* Module must be in memory at the same address */
1083 mi = find_module(old_map->dso->short_name, &modules);
1084 if (!mi || mi->start != old_map->start) {
1085 err = -EINVAL;
1086 goto out;
1087 }
1088
1089 old_map = next;
1090 }
1091out:
1092 delete_modules(&modules);
1093 return err;
1094}
1095
1096/*
1097 * If kallsyms is referenced by name then we look for filename in the same
1098 * directory.
1099 */
1100static bool filename_from_kallsyms_filename(char *filename,
1101 const char *base_name,
1102 const char *kallsyms_filename)
1103{
1104 char *name;
1105
1106 strcpy(filename, kallsyms_filename);
1107 name = strrchr(filename, '/');
1108 if (!name)
1109 return false;
1110
1111 name += 1;
1112
1113 if (!strcmp(name, "kallsyms")) {
1114 strcpy(name, base_name);
1115 return true;
1116 }
1117
1118 return false;
1119}
1120
1121static int validate_kcore_modules(const char *kallsyms_filename,
1122 struct map *map)
1123{
1124 struct map_groups *kmaps = map__kmaps(map);
1125 char modules_filename[PATH_MAX];
1126
1127 if (!kmaps)
1128 return -EINVAL;
1129
1130 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1131 kallsyms_filename))
1132 return -EINVAL;
1133
1134 if (do_validate_kcore_modules(modules_filename, kmaps))
1135 return -EINVAL;
1136
1137 return 0;
1138}
1139
1140static int validate_kcore_addresses(const char *kallsyms_filename,
1141 struct map *map)
1142{
1143 struct kmap *kmap = map__kmap(map);
1144
1145 if (!kmap)
1146 return -EINVAL;
1147
1148 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1149 u64 start;
1150
1151 if (kallsyms__get_function_start(kallsyms_filename,
1152 kmap->ref_reloc_sym->name, &start))
1153 return -ENOENT;
1154 if (start != kmap->ref_reloc_sym->addr)
1155 return -EINVAL;
1156 }
1157
1158 return validate_kcore_modules(kallsyms_filename, map);
1159}
1160
1161struct kcore_mapfn_data {
1162 struct dso *dso;
1163 struct list_head maps;
1164};
1165
1166static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1167{
1168 struct kcore_mapfn_data *md = data;
1169 struct map *map;
1170
1171 map = map__new2(start, md->dso);
1172 if (map == NULL)
1173 return -ENOMEM;
1174
1175 map->end = map->start + len;
1176 map->pgoff = pgoff;
1177
1178 list_add(&map->node, &md->maps);
1179
1180 return 0;
1181}
1182
1183/*
1184 * Merges map into map_groups by splitting the new map
1185 * within the existing map regions.
1186 */
1187int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
1188{
1189 struct map *old_map;
1190 LIST_HEAD(merged);
1191
1192 for (old_map = map_groups__first(kmaps); old_map;
1193 old_map = map_groups__next(old_map)) {
1194
1195 /* no overload with this one */
1196 if (new_map->end < old_map->start ||
1197 new_map->start >= old_map->end)
1198 continue;
1199
1200 if (new_map->start < old_map->start) {
1201 /*
1202 * |new......
1203 * |old....
1204 */
1205 if (new_map->end < old_map->end) {
1206 /*
1207 * |new......| -> |new..|
1208 * |old....| -> |old....|
1209 */
1210 new_map->end = old_map->start;
1211 } else {
1212 /*
1213 * |new.............| -> |new..| |new..|
1214 * |old....| -> |old....|
1215 */
1216 struct map *m = map__clone(new_map);
1217
1218 if (!m)
1219 return -ENOMEM;
1220
1221 m->end = old_map->start;
1222 list_add_tail(&m->node, &merged);
1223 new_map->start = old_map->end;
1224 }
1225 } else {
1226 /*
1227 * |new......
1228 * |old....
1229 */
1230 if (new_map->end < old_map->end) {
1231 /*
1232 * |new..| -> x
1233 * |old.........| -> |old.........|
1234 */
1235 map__put(new_map);
1236 new_map = NULL;
1237 break;
1238 } else {
1239 /*
1240 * |new......| -> |new...|
1241 * |old....| -> |old....|
1242 */
1243 new_map->start = old_map->end;
1244 }
1245 }
1246 }
1247
1248 while (!list_empty(&merged)) {
1249 old_map = list_entry(merged.next, struct map, node);
1250 list_del_init(&old_map->node);
1251 map_groups__insert(kmaps, old_map);
1252 map__put(old_map);
1253 }
1254
1255 if (new_map) {
1256 map_groups__insert(kmaps, new_map);
1257 map__put(new_map);
1258 }
1259 return 0;
1260}
1261
1262static int dso__load_kcore(struct dso *dso, struct map *map,
1263 const char *kallsyms_filename)
1264{
1265 struct map_groups *kmaps = map__kmaps(map);
1266 struct kcore_mapfn_data md;
1267 struct map *old_map, *new_map, *replacement_map = NULL;
1268 struct machine *machine;
1269 bool is_64_bit;
1270 int err, fd;
1271 char kcore_filename[PATH_MAX];
1272 u64 stext;
1273
1274 if (!kmaps)
1275 return -EINVAL;
1276
1277 machine = kmaps->machine;
1278
1279 /* This function requires that the map is the kernel map */
1280 if (!__map__is_kernel(map))
1281 return -EINVAL;
1282
1283 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1284 kallsyms_filename))
1285 return -EINVAL;
1286
1287 /* Modules and kernel must be present at their original addresses */
1288 if (validate_kcore_addresses(kallsyms_filename, map))
1289 return -EINVAL;
1290
1291 md.dso = dso;
1292 INIT_LIST_HEAD(&md.maps);
1293
1294 fd = open(kcore_filename, O_RDONLY);
1295 if (fd < 0) {
1296 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1297 kcore_filename);
1298 return -EINVAL;
1299 }
1300
1301 /* Read new maps into temporary lists */
1302 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1303 &is_64_bit);
1304 if (err)
1305 goto out_err;
1306 dso->is_64_bit = is_64_bit;
1307
1308 if (list_empty(&md.maps)) {
1309 err = -EINVAL;
1310 goto out_err;
1311 }
1312
1313 /* Remove old maps */
1314 old_map = map_groups__first(kmaps);
1315 while (old_map) {
1316 struct map *next = map_groups__next(old_map);
1317
1318 /*
1319 * We need to preserve eBPF maps even if they are
1320 * covered by kcore, because we need to access
1321 * eBPF dso for source data.
1322 */
1323 if (old_map != map && !__map__is_bpf_prog(old_map))
1324 map_groups__remove(kmaps, old_map);
1325 old_map = next;
1326 }
1327 machine->trampolines_mapped = false;
1328
1329 /* Find the kernel map using the '_stext' symbol */
1330 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1331 list_for_each_entry(new_map, &md.maps, node) {
1332 if (stext >= new_map->start && stext < new_map->end) {
1333 replacement_map = new_map;
1334 break;
1335 }
1336 }
1337 }
1338
1339 if (!replacement_map)
1340 replacement_map = list_entry(md.maps.next, struct map, node);
1341
1342 /* Add new maps */
1343 while (!list_empty(&md.maps)) {
1344 new_map = list_entry(md.maps.next, struct map, node);
1345 list_del_init(&new_map->node);
1346 if (new_map == replacement_map) {
1347 map->start = new_map->start;
1348 map->end = new_map->end;
1349 map->pgoff = new_map->pgoff;
1350 map->map_ip = new_map->map_ip;
1351 map->unmap_ip = new_map->unmap_ip;
1352 /* Ensure maps are correctly ordered */
1353 map__get(map);
1354 map_groups__remove(kmaps, map);
1355 map_groups__insert(kmaps, map);
1356 map__put(map);
1357 map__put(new_map);
1358 } else {
1359 /*
1360 * Merge kcore map into existing maps,
1361 * and ensure that current maps (eBPF)
1362 * stay intact.
1363 */
1364 if (map_groups__merge_in(kmaps, new_map))
1365 goto out_err;
1366 }
1367 }
1368
1369 if (machine__is(machine, "x86_64")) {
1370 u64 addr;
1371
1372 /*
1373 * If one of the corresponding symbols is there, assume the
1374 * entry trampoline maps are too.
1375 */
1376 if (!kallsyms__get_function_start(kallsyms_filename,
1377 ENTRY_TRAMPOLINE_NAME,
1378 &addr))
1379 machine->trampolines_mapped = true;
1380 }
1381
1382 /*
1383 * Set the data type and long name so that kcore can be read via
1384 * dso__data_read_addr().
1385 */
1386 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1387 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1388 else
1389 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1390 dso__set_long_name(dso, strdup(kcore_filename), true);
1391
1392 close(fd);
1393
1394 if (map->prot & PROT_EXEC)
1395 pr_debug("Using %s for kernel object code\n", kcore_filename);
1396 else
1397 pr_debug("Using %s for kernel data\n", kcore_filename);
1398
1399 return 0;
1400
1401out_err:
1402 while (!list_empty(&md.maps)) {
1403 map = list_entry(md.maps.next, struct map, node);
1404 list_del_init(&map->node);
1405 map__put(map);
1406 }
1407 close(fd);
1408 return -EINVAL;
1409}
1410
1411/*
1412 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1413 * delta based on the relocation reference symbol.
1414 */
1415static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1416{
1417 u64 addr;
1418
1419 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1420 return 0;
1421
1422 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1423 return -1;
1424
1425 *delta = addr - kmap->ref_reloc_sym->addr;
1426 return 0;
1427}
1428
1429int __dso__load_kallsyms(struct dso *dso, const char *filename,
1430 struct map *map, bool no_kcore)
1431{
1432 struct kmap *kmap = map__kmap(map);
1433 u64 delta = 0;
1434
1435 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1436 return -1;
1437
1438 if (!kmap || !kmap->kmaps)
1439 return -1;
1440
1441 if (dso__load_all_kallsyms(dso, filename) < 0)
1442 return -1;
1443
1444 if (kallsyms__delta(kmap, filename, &delta))
1445 return -1;
1446
1447 symbols__fixup_end(&dso->symbols);
1448 symbols__fixup_duplicate(&dso->symbols);
1449
1450 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1451 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1452 else
1453 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1454
1455 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1456 return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1457 else
1458 return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1459}
1460
1461int dso__load_kallsyms(struct dso *dso, const char *filename,
1462 struct map *map)
1463{
1464 return __dso__load_kallsyms(dso, filename, map, false);
1465}
1466
1467static int dso__load_perf_map(const char *map_path, struct dso *dso)
1468{
1469 char *line = NULL;
1470 size_t n;
1471 FILE *file;
1472 int nr_syms = 0;
1473
1474 file = fopen(map_path, "r");
1475 if (file == NULL)
1476 goto out_failure;
1477
1478 while (!feof(file)) {
1479 u64 start, size;
1480 struct symbol *sym;
1481 int line_len, len;
1482
1483 line_len = getline(&line, &n, file);
1484 if (line_len < 0)
1485 break;
1486
1487 if (!line)
1488 goto out_failure;
1489
1490 line[--line_len] = '\0'; /* \n */
1491
1492 len = hex2u64(line, &start);
1493
1494 len++;
1495 if (len + 2 >= line_len)
1496 continue;
1497
1498 len += hex2u64(line + len, &size);
1499
1500 len++;
1501 if (len + 2 >= line_len)
1502 continue;
1503
1504 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1505
1506 if (sym == NULL)
1507 goto out_delete_line;
1508
1509 symbols__insert(&dso->symbols, sym);
1510 nr_syms++;
1511 }
1512
1513 free(line);
1514 fclose(file);
1515
1516 return nr_syms;
1517
1518out_delete_line:
1519 free(line);
1520out_failure:
1521 return -1;
1522}
1523
1524static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1525 enum dso_binary_type type)
1526{
1527 switch (type) {
1528 case DSO_BINARY_TYPE__JAVA_JIT:
1529 case DSO_BINARY_TYPE__DEBUGLINK:
1530 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1531 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1532 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1533 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1534 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1535 return !kmod && dso->kernel == DSO_TYPE_USER;
1536
1537 case DSO_BINARY_TYPE__KALLSYMS:
1538 case DSO_BINARY_TYPE__VMLINUX:
1539 case DSO_BINARY_TYPE__KCORE:
1540 return dso->kernel == DSO_TYPE_KERNEL;
1541
1542 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1543 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1544 case DSO_BINARY_TYPE__GUEST_KCORE:
1545 return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1546
1547 case DSO_BINARY_TYPE__GUEST_KMODULE:
1548 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1549 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1550 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1551 /*
1552 * kernel modules know their symtab type - it's set when
1553 * creating a module dso in machine__findnew_module_map().
1554 */
1555 return kmod && dso->symtab_type == type;
1556
1557 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1558 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1559 return true;
1560
1561 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1562 case DSO_BINARY_TYPE__NOT_FOUND:
1563 default:
1564 return false;
1565 }
1566}
1567
1568/* Checks for the existence of the perf-<pid>.map file in two different
1569 * locations. First, if the process is a separate mount namespace, check in
1570 * that namespace using the pid of the innermost pid namespace. If's not in a
1571 * namespace, or the file can't be found there, try in the mount namespace of
1572 * the tracing process using our view of its pid.
1573 */
1574static int dso__find_perf_map(char *filebuf, size_t bufsz,
1575 struct nsinfo **nsip)
1576{
1577 struct nscookie nsc;
1578 struct nsinfo *nsi;
1579 struct nsinfo *nnsi;
1580 int rc = -1;
1581
1582 nsi = *nsip;
1583
1584 if (nsi->need_setns) {
1585 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1586 nsinfo__mountns_enter(nsi, &nsc);
1587 rc = access(filebuf, R_OK);
1588 nsinfo__mountns_exit(&nsc);
1589 if (rc == 0)
1590 return rc;
1591 }
1592
1593 nnsi = nsinfo__copy(nsi);
1594 if (nnsi) {
1595 nsinfo__put(nsi);
1596
1597 nnsi->need_setns = false;
1598 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1599 *nsip = nnsi;
1600 rc = 0;
1601 }
1602
1603 return rc;
1604}
1605
1606int dso__load(struct dso *dso, struct map *map)
1607{
1608 char *name;
1609 int ret = -1;
1610 u_int i;
1611 struct machine *machine;
1612 char *root_dir = (char *) "";
1613 int ss_pos = 0;
1614 struct symsrc ss_[2];
1615 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1616 bool kmod;
1617 bool perfmap;
1618 unsigned char build_id[BUILD_ID_SIZE];
1619 struct nscookie nsc;
1620 char newmapname[PATH_MAX];
1621 const char *map_path = dso->long_name;
1622
1623 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1624 if (perfmap) {
1625 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1626 sizeof(newmapname), &dso->nsinfo) == 0)) {
1627 map_path = newmapname;
1628 }
1629 }
1630
1631 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1632 pthread_mutex_lock(&dso->lock);
1633
1634 /* check again under the dso->lock */
1635 if (dso__loaded(dso)) {
1636 ret = 1;
1637 goto out;
1638 }
1639
1640 if (map->groups && map->groups->machine)
1641 machine = map->groups->machine;
1642 else
1643 machine = NULL;
1644
1645 if (dso->kernel) {
1646 if (dso->kernel == DSO_TYPE_KERNEL)
1647 ret = dso__load_kernel_sym(dso, map);
1648 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1649 ret = dso__load_guest_kernel_sym(dso, map);
1650
1651 if (machine__is(machine, "x86_64"))
1652 machine__map_x86_64_entry_trampolines(machine, dso);
1653 goto out;
1654 }
1655
1656 dso->adjust_symbols = 0;
1657
1658 if (perfmap) {
1659 ret = dso__load_perf_map(map_path, dso);
1660 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1661 DSO_BINARY_TYPE__NOT_FOUND;
1662 goto out;
1663 }
1664
1665 if (machine)
1666 root_dir = machine->root_dir;
1667
1668 name = malloc(PATH_MAX);
1669 if (!name)
1670 goto out;
1671
1672 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1673 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1674 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1675 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1676
1677
1678 /*
1679 * Read the build id if possible. This is required for
1680 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1681 */
1682 if (!dso->has_build_id &&
1683 is_regular_file(dso->long_name)) {
1684 __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1685 if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1686 dso__set_build_id(dso, build_id);
1687 }
1688
1689 /*
1690 * Iterate over candidate debug images.
1691 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1692 * and/or opd section) for processing.
1693 */
1694 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1695 struct symsrc *ss = &ss_[ss_pos];
1696 bool next_slot = false;
1697 bool is_reg;
1698 bool nsexit;
1699 int sirc = -1;
1700
1701 enum dso_binary_type symtab_type = binary_type_symtab[i];
1702
1703 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1704 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1705
1706 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1707 continue;
1708
1709 if (dso__read_binary_type_filename(dso, symtab_type,
1710 root_dir, name, PATH_MAX))
1711 continue;
1712
1713 if (nsexit)
1714 nsinfo__mountns_exit(&nsc);
1715
1716 is_reg = is_regular_file(name);
1717 if (is_reg)
1718 sirc = symsrc__init(ss, dso, name, symtab_type);
1719
1720 if (nsexit)
1721 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1722
1723 if (!is_reg || sirc < 0)
1724 continue;
1725
1726 if (!syms_ss && symsrc__has_symtab(ss)) {
1727 syms_ss = ss;
1728 next_slot = true;
1729 if (!dso->symsrc_filename)
1730 dso->symsrc_filename = strdup(name);
1731 }
1732
1733 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1734 runtime_ss = ss;
1735 next_slot = true;
1736 }
1737
1738 if (next_slot) {
1739 ss_pos++;
1740
1741 if (syms_ss && runtime_ss)
1742 break;
1743 } else {
1744 symsrc__destroy(ss);
1745 }
1746
1747 }
1748
1749 if (!runtime_ss && !syms_ss)
1750 goto out_free;
1751
1752 if (runtime_ss && !syms_ss) {
1753 syms_ss = runtime_ss;
1754 }
1755
1756 /* We'll have to hope for the best */
1757 if (!runtime_ss && syms_ss)
1758 runtime_ss = syms_ss;
1759
1760 if (syms_ss)
1761 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1762 else
1763 ret = -1;
1764
1765 if (ret > 0) {
1766 int nr_plt;
1767
1768 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1769 if (nr_plt > 0)
1770 ret += nr_plt;
1771 }
1772
1773 for (; ss_pos > 0; ss_pos--)
1774 symsrc__destroy(&ss_[ss_pos - 1]);
1775out_free:
1776 free(name);
1777 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1778 ret = 0;
1779out:
1780 dso__set_loaded(dso);
1781 pthread_mutex_unlock(&dso->lock);
1782 nsinfo__mountns_exit(&nsc);
1783
1784 return ret;
1785}
1786
1787struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1788{
1789 struct maps *maps = &mg->maps;
1790 struct map *map;
1791 struct rb_node *node;
1792
1793 down_read(&maps->lock);
1794
1795 for (node = maps->names.rb_node; node; ) {
1796 int rc;
1797
1798 map = rb_entry(node, struct map, rb_node_name);
1799
1800 rc = strcmp(map->dso->short_name, name);
1801 if (rc < 0)
1802 node = node->rb_left;
1803 else if (rc > 0)
1804 node = node->rb_right;
1805 else
1806
1807 goto out_unlock;
1808 }
1809
1810 map = NULL;
1811
1812out_unlock:
1813 up_read(&maps->lock);
1814 return map;
1815}
1816
1817int dso__load_vmlinux(struct dso *dso, struct map *map,
1818 const char *vmlinux, bool vmlinux_allocated)
1819{
1820 int err = -1;
1821 struct symsrc ss;
1822 char symfs_vmlinux[PATH_MAX];
1823 enum dso_binary_type symtab_type;
1824
1825 if (vmlinux[0] == '/')
1826 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1827 else
1828 symbol__join_symfs(symfs_vmlinux, vmlinux);
1829
1830 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1831 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1832 else
1833 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1834
1835 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1836 return -1;
1837
1838 err = dso__load_sym(dso, map, &ss, &ss, 0);
1839 symsrc__destroy(&ss);
1840
1841 if (err > 0) {
1842 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1843 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1844 else
1845 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1846 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1847 dso__set_loaded(dso);
1848 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1849 }
1850
1851 return err;
1852}
1853
1854int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1855{
1856 int i, err = 0;
1857 char *filename = NULL;
1858
1859 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1860 vmlinux_path__nr_entries + 1);
1861
1862 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1863 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1864 if (err > 0)
1865 goto out;
1866 }
1867
1868 if (!symbol_conf.ignore_vmlinux_buildid)
1869 filename = dso__build_id_filename(dso, NULL, 0, false);
1870 if (filename != NULL) {
1871 err = dso__load_vmlinux(dso, map, filename, true);
1872 if (err > 0)
1873 goto out;
1874 free(filename);
1875 }
1876out:
1877 return err;
1878}
1879
1880static bool visible_dir_filter(const char *name, struct dirent *d)
1881{
1882 if (d->d_type != DT_DIR)
1883 return false;
1884 return lsdir_no_dot_filter(name, d);
1885}
1886
1887static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1888{
1889 char kallsyms_filename[PATH_MAX];
1890 int ret = -1;
1891 struct strlist *dirs;
1892 struct str_node *nd;
1893
1894 dirs = lsdir(dir, visible_dir_filter);
1895 if (!dirs)
1896 return -1;
1897
1898 strlist__for_each_entry(nd, dirs) {
1899 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1900 "%s/%s/kallsyms", dir, nd->s);
1901 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1902 strlcpy(dir, kallsyms_filename, dir_sz);
1903 ret = 0;
1904 break;
1905 }
1906 }
1907
1908 strlist__delete(dirs);
1909
1910 return ret;
1911}
1912
1913/*
1914 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1915 * since access(R_OK) only checks with real UID/GID but open() use effective
1916 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1917 */
1918static bool filename__readable(const char *file)
1919{
1920 int fd = open(file, O_RDONLY);
1921 if (fd < 0)
1922 return false;
1923 close(fd);
1924 return true;
1925}
1926
1927static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1928{
1929 u8 host_build_id[BUILD_ID_SIZE];
1930 char sbuild_id[SBUILD_ID_SIZE];
1931 bool is_host = false;
1932 char path[PATH_MAX];
1933
1934 if (!dso->has_build_id) {
1935 /*
1936 * Last resort, if we don't have a build-id and couldn't find
1937 * any vmlinux file, try the running kernel kallsyms table.
1938 */
1939 goto proc_kallsyms;
1940 }
1941
1942 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1943 sizeof(host_build_id)) == 0)
1944 is_host = dso__build_id_equal(dso, host_build_id);
1945
1946 /* Try a fast path for /proc/kallsyms if possible */
1947 if (is_host) {
1948 /*
1949 * Do not check the build-id cache, unless we know we cannot use
1950 * /proc/kcore or module maps don't match to /proc/kallsyms.
1951 * To check readability of /proc/kcore, do not use access(R_OK)
1952 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1953 * can't check it.
1954 */
1955 if (filename__readable("/proc/kcore") &&
1956 !validate_kcore_addresses("/proc/kallsyms", map))
1957 goto proc_kallsyms;
1958 }
1959
1960 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1961
1962 /* Find kallsyms in build-id cache with kcore */
1963 scnprintf(path, sizeof(path), "%s/%s/%s",
1964 buildid_dir, DSO__NAME_KCORE, sbuild_id);
1965
1966 if (!find_matching_kcore(map, path, sizeof(path)))
1967 return strdup(path);
1968
1969 /* Use current /proc/kallsyms if possible */
1970 if (is_host) {
1971proc_kallsyms:
1972 return strdup("/proc/kallsyms");
1973 }
1974
1975 /* Finally, find a cache of kallsyms */
1976 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1977 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1978 sbuild_id);
1979 return NULL;
1980 }
1981
1982 return strdup(path);
1983}
1984
1985static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1986{
1987 int err;
1988 const char *kallsyms_filename = NULL;
1989 char *kallsyms_allocated_filename = NULL;
1990 /*
1991 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1992 * it and only it, reporting errors to the user if it cannot be used.
1993 *
1994 * For instance, try to analyse an ARM perf.data file _without_ a
1995 * build-id, or if the user specifies the wrong path to the right
1996 * vmlinux file, obviously we can't fallback to another vmlinux (a
1997 * x86_86 one, on the machine where analysis is being performed, say),
1998 * or worse, /proc/kallsyms.
1999 *
2000 * If the specified file _has_ a build-id and there is a build-id
2001 * section in the perf.data file, we will still do the expected
2002 * validation in dso__load_vmlinux and will bail out if they don't
2003 * match.
2004 */
2005 if (symbol_conf.kallsyms_name != NULL) {
2006 kallsyms_filename = symbol_conf.kallsyms_name;
2007 goto do_kallsyms;
2008 }
2009
2010 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2011 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2012 }
2013
2014 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2015 err = dso__load_vmlinux_path(dso, map);
2016 if (err > 0)
2017 return err;
2018 }
2019
2020 /* do not try local files if a symfs was given */
2021 if (symbol_conf.symfs[0] != 0)
2022 return -1;
2023
2024 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2025 if (!kallsyms_allocated_filename)
2026 return -1;
2027
2028 kallsyms_filename = kallsyms_allocated_filename;
2029
2030do_kallsyms:
2031 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2032 if (err > 0)
2033 pr_debug("Using %s for symbols\n", kallsyms_filename);
2034 free(kallsyms_allocated_filename);
2035
2036 if (err > 0 && !dso__is_kcore(dso)) {
2037 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2038 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2039 map__fixup_start(map);
2040 map__fixup_end(map);
2041 }
2042
2043 return err;
2044}
2045
2046static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2047{
2048 int err;
2049 const char *kallsyms_filename = NULL;
2050 struct machine *machine;
2051 char path[PATH_MAX];
2052
2053 if (!map->groups) {
2054 pr_debug("Guest kernel map hasn't the point to groups\n");
2055 return -1;
2056 }
2057 machine = map->groups->machine;
2058
2059 if (machine__is_default_guest(machine)) {
2060 /*
2061 * if the user specified a vmlinux filename, use it and only
2062 * it, reporting errors to the user if it cannot be used.
2063 * Or use file guest_kallsyms inputted by user on commandline
2064 */
2065 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2066 err = dso__load_vmlinux(dso, map,
2067 symbol_conf.default_guest_vmlinux_name,
2068 false);
2069 return err;
2070 }
2071
2072 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2073 if (!kallsyms_filename)
2074 return -1;
2075 } else {
2076 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2077 kallsyms_filename = path;
2078 }
2079
2080 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2081 if (err > 0)
2082 pr_debug("Using %s for symbols\n", kallsyms_filename);
2083 if (err > 0 && !dso__is_kcore(dso)) {
2084 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2085 dso__set_long_name(dso, machine->mmap_name, false);
2086 map__fixup_start(map);
2087 map__fixup_end(map);
2088 }
2089
2090 return err;
2091}
2092
2093static void vmlinux_path__exit(void)
2094{
2095 while (--vmlinux_path__nr_entries >= 0)
2096 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2097 vmlinux_path__nr_entries = 0;
2098
2099 zfree(&vmlinux_path);
2100}
2101
2102static const char * const vmlinux_paths[] = {
2103 "vmlinux",
2104 "/boot/vmlinux"
2105};
2106
2107static const char * const vmlinux_paths_upd[] = {
2108 "/boot/vmlinux-%s",
2109 "/usr/lib/debug/boot/vmlinux-%s",
2110 "/lib/modules/%s/build/vmlinux",
2111 "/usr/lib/debug/lib/modules/%s/vmlinux",
2112 "/usr/lib/debug/boot/vmlinux-%s.debug"
2113};
2114
2115static int vmlinux_path__add(const char *new_entry)
2116{
2117 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2118 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2119 return -1;
2120 ++vmlinux_path__nr_entries;
2121
2122 return 0;
2123}
2124
2125static int vmlinux_path__init(struct perf_env *env)
2126{
2127 struct utsname uts;
2128 char bf[PATH_MAX];
2129 char *kernel_version;
2130 unsigned int i;
2131
2132 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2133 ARRAY_SIZE(vmlinux_paths_upd)));
2134 if (vmlinux_path == NULL)
2135 return -1;
2136
2137 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2138 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2139 goto out_fail;
2140
2141 /* only try kernel version if no symfs was given */
2142 if (symbol_conf.symfs[0] != 0)
2143 return 0;
2144
2145 if (env) {
2146 kernel_version = env->os_release;
2147 } else {
2148 if (uname(&uts) < 0)
2149 goto out_fail;
2150
2151 kernel_version = uts.release;
2152 }
2153
2154 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2155 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2156 if (vmlinux_path__add(bf) < 0)
2157 goto out_fail;
2158 }
2159
2160 return 0;
2161
2162out_fail:
2163 vmlinux_path__exit();
2164 return -1;
2165}
2166
2167int setup_list(struct strlist **list, const char *list_str,
2168 const char *list_name)
2169{
2170 if (list_str == NULL)
2171 return 0;
2172
2173 *list = strlist__new(list_str, NULL);
2174 if (!*list) {
2175 pr_err("problems parsing %s list\n", list_name);
2176 return -1;
2177 }
2178
2179 symbol_conf.has_filter = true;
2180 return 0;
2181}
2182
2183int setup_intlist(struct intlist **list, const char *list_str,
2184 const char *list_name)
2185{
2186 if (list_str == NULL)
2187 return 0;
2188
2189 *list = intlist__new(list_str);
2190 if (!*list) {
2191 pr_err("problems parsing %s list\n", list_name);
2192 return -1;
2193 }
2194 return 0;
2195}
2196
2197static bool symbol__read_kptr_restrict(void)
2198{
2199 bool value = false;
2200 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2201
2202 if (fp != NULL) {
2203 char line[8];
2204
2205 if (fgets(line, sizeof(line), fp) != NULL)
2206 value = perf_cap__capable(CAP_SYSLOG) ?
2207 (atoi(line) >= 2) :
2208 (atoi(line) != 0);
2209
2210 fclose(fp);
2211 }
2212
2213 /* Per kernel/kallsyms.c:
2214 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2215 */
2216 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2217 value = true;
2218
2219 return value;
2220}
2221
2222int symbol__annotation_init(void)
2223{
2224 if (symbol_conf.init_annotation)
2225 return 0;
2226
2227 if (symbol_conf.initialized) {
2228 pr_err("Annotation needs to be init before symbol__init()\n");
2229 return -1;
2230 }
2231
2232 symbol_conf.priv_size += sizeof(struct annotation);
2233 symbol_conf.init_annotation = true;
2234 return 0;
2235}
2236
2237int symbol__init(struct perf_env *env)
2238{
2239 const char *symfs;
2240
2241 if (symbol_conf.initialized)
2242 return 0;
2243
2244 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2245
2246 symbol__elf_init();
2247
2248 if (symbol_conf.sort_by_name)
2249 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2250 sizeof(struct symbol));
2251
2252 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2253 return -1;
2254
2255 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2256 pr_err("'.' is the only non valid --field-separator argument\n");
2257 return -1;
2258 }
2259
2260 if (setup_list(&symbol_conf.dso_list,
2261 symbol_conf.dso_list_str, "dso") < 0)
2262 return -1;
2263
2264 if (setup_list(&symbol_conf.comm_list,
2265 symbol_conf.comm_list_str, "comm") < 0)
2266 goto out_free_dso_list;
2267
2268 if (setup_intlist(&symbol_conf.pid_list,
2269 symbol_conf.pid_list_str, "pid") < 0)
2270 goto out_free_comm_list;
2271
2272 if (setup_intlist(&symbol_conf.tid_list,
2273 symbol_conf.tid_list_str, "tid") < 0)
2274 goto out_free_pid_list;
2275
2276 if (setup_list(&symbol_conf.sym_list,
2277 symbol_conf.sym_list_str, "symbol") < 0)
2278 goto out_free_tid_list;
2279
2280 if (setup_list(&symbol_conf.bt_stop_list,
2281 symbol_conf.bt_stop_list_str, "symbol") < 0)
2282 goto out_free_sym_list;
2283
2284 /*
2285 * A path to symbols of "/" is identical to ""
2286 * reset here for simplicity.
2287 */
2288 symfs = realpath(symbol_conf.symfs, NULL);
2289 if (symfs == NULL)
2290 symfs = symbol_conf.symfs;
2291 if (strcmp(symfs, "/") == 0)
2292 symbol_conf.symfs = "";
2293 if (symfs != symbol_conf.symfs)
2294 free((void *)symfs);
2295
2296 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2297
2298 symbol_conf.initialized = true;
2299 return 0;
2300
2301out_free_sym_list:
2302 strlist__delete(symbol_conf.sym_list);
2303out_free_tid_list:
2304 intlist__delete(symbol_conf.tid_list);
2305out_free_pid_list:
2306 intlist__delete(symbol_conf.pid_list);
2307out_free_comm_list:
2308 strlist__delete(symbol_conf.comm_list);
2309out_free_dso_list:
2310 strlist__delete(symbol_conf.dso_list);
2311 return -1;
2312}
2313
2314void symbol__exit(void)
2315{
2316 if (!symbol_conf.initialized)
2317 return;
2318 strlist__delete(symbol_conf.bt_stop_list);
2319 strlist__delete(symbol_conf.sym_list);
2320 strlist__delete(symbol_conf.dso_list);
2321 strlist__delete(symbol_conf.comm_list);
2322 intlist__delete(symbol_conf.tid_list);
2323 intlist__delete(symbol_conf.pid_list);
2324 vmlinux_path__exit();
2325 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2326 symbol_conf.bt_stop_list = NULL;
2327 symbol_conf.initialized = false;
2328}
2329
2330int symbol__config_symfs(const struct option *opt __maybe_unused,
2331 const char *dir, int unset __maybe_unused)
2332{
2333 char *bf = NULL;
2334 int ret;
2335
2336 symbol_conf.symfs = strdup(dir);
2337 if (symbol_conf.symfs == NULL)
2338 return -ENOMEM;
2339
2340 /* skip the locally configured cache if a symfs is given, and
2341 * config buildid dir to symfs/.debug
2342 */
2343 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2344 if (ret < 0)
2345 return -ENOMEM;
2346
2347 set_buildid_dir(bf);
2348
2349 free(bf);
2350 return 0;
2351}
2352
2353struct mem_info *mem_info__get(struct mem_info *mi)
2354{
2355 if (mi)
2356 refcount_inc(&mi->refcnt);
2357 return mi;
2358}
2359
2360void mem_info__put(struct mem_info *mi)
2361{
2362 if (mi && refcount_dec_and_test(&mi->refcnt))
2363 free(mi);
2364}
2365
2366struct mem_info *mem_info__new(void)
2367{
2368 struct mem_info *mi = zalloc(sizeof(*mi));
2369
2370 if (mi)
2371 refcount_set(&mi->refcnt, 1);
2372 return mi;
2373}
2374
2375struct block_info *block_info__get(struct block_info *bi)
2376{
2377 if (bi)
2378 refcount_inc(&bi->refcnt);
2379 return bi;
2380}
2381
2382void block_info__put(struct block_info *bi)
2383{
2384 if (bi && refcount_dec_and_test(&bi->refcnt))
2385 free(bi);
2386}
2387
2388struct block_info *block_info__new(void)
2389{
2390 struct block_info *bi = zalloc(sizeof(*bi));
2391
2392 if (bi)
2393 refcount_set(&bi->refcnt, 1);
2394 return bi;
2395}
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <stdlib.h>
5#include <stdio.h>
6#include <string.h>
7#include <linux/capability.h>
8#include <linux/kernel.h>
9#include <linux/mman.h>
10#include <linux/string.h>
11#include <linux/time64.h>
12#include <sys/types.h>
13#include <sys/stat.h>
14#include <sys/param.h>
15#include <fcntl.h>
16#include <unistd.h>
17#include <inttypes.h>
18#include "annotate.h"
19#include "build-id.h"
20#include "cap.h"
21#include "dso.h"
22#include "util.h" // lsdir()
23#include "debug.h"
24#include "event.h"
25#include "machine.h"
26#include "map.h"
27#include "symbol.h"
28#include "map_symbol.h"
29#include "mem-events.h"
30#include "symsrc.h"
31#include "strlist.h"
32#include "intlist.h"
33#include "namespaces.h"
34#include "header.h"
35#include "path.h"
36#include <linux/ctype.h>
37#include <linux/zalloc.h>
38
39#include <elf.h>
40#include <limits.h>
41#include <symbol/kallsyms.h>
42#include <sys/utsname.h>
43
44static int dso__load_kernel_sym(struct dso *dso, struct map *map);
45static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
46static bool symbol__is_idle(const char *name);
47
48int vmlinux_path__nr_entries;
49char **vmlinux_path;
50
51struct symbol_conf symbol_conf = {
52 .nanosecs = false,
53 .use_modules = true,
54 .try_vmlinux_path = true,
55 .demangle = true,
56 .demangle_kernel = false,
57 .cumulate_callchain = true,
58 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
59 .show_hist_headers = true,
60 .symfs = "",
61 .event_group = true,
62 .inline_name = true,
63 .res_sample = 0,
64};
65
66static enum dso_binary_type binary_type_symtab[] = {
67 DSO_BINARY_TYPE__KALLSYMS,
68 DSO_BINARY_TYPE__GUEST_KALLSYMS,
69 DSO_BINARY_TYPE__JAVA_JIT,
70 DSO_BINARY_TYPE__DEBUGLINK,
71 DSO_BINARY_TYPE__BUILD_ID_CACHE,
72 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
73 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
74 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
75 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
76 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
77 DSO_BINARY_TYPE__GUEST_KMODULE,
78 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
79 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
80 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
81 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
82 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
83 DSO_BINARY_TYPE__NOT_FOUND,
84};
85
86#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
87
88static bool symbol_type__filter(char symbol_type)
89{
90 symbol_type = toupper(symbol_type);
91 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
92}
93
94static int prefix_underscores_count(const char *str)
95{
96 const char *tail = str;
97
98 while (*tail == '_')
99 tail++;
100
101 return tail - str;
102}
103
104void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
105{
106 p->end = c->start;
107}
108
109const char * __weak arch__normalize_symbol_name(const char *name)
110{
111 return name;
112}
113
114int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
115{
116 return strcmp(namea, nameb);
117}
118
119int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
120 unsigned int n)
121{
122 return strncmp(namea, nameb, n);
123}
124
125int __weak arch__choose_best_symbol(struct symbol *syma,
126 struct symbol *symb __maybe_unused)
127{
128 /* Avoid "SyS" kernel syscall aliases */
129 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
130 return SYMBOL_B;
131 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
132 return SYMBOL_B;
133
134 return SYMBOL_A;
135}
136
137static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
138{
139 s64 a;
140 s64 b;
141 size_t na, nb;
142
143 /* Prefer a symbol with non zero length */
144 a = syma->end - syma->start;
145 b = symb->end - symb->start;
146 if ((b == 0) && (a > 0))
147 return SYMBOL_A;
148 else if ((a == 0) && (b > 0))
149 return SYMBOL_B;
150
151 /* Prefer a non weak symbol over a weak one */
152 a = syma->binding == STB_WEAK;
153 b = symb->binding == STB_WEAK;
154 if (b && !a)
155 return SYMBOL_A;
156 if (a && !b)
157 return SYMBOL_B;
158
159 /* Prefer a global symbol over a non global one */
160 a = syma->binding == STB_GLOBAL;
161 b = symb->binding == STB_GLOBAL;
162 if (a && !b)
163 return SYMBOL_A;
164 if (b && !a)
165 return SYMBOL_B;
166
167 /* Prefer a symbol with less underscores */
168 a = prefix_underscores_count(syma->name);
169 b = prefix_underscores_count(symb->name);
170 if (b > a)
171 return SYMBOL_A;
172 else if (a > b)
173 return SYMBOL_B;
174
175 /* Choose the symbol with the longest name */
176 na = strlen(syma->name);
177 nb = strlen(symb->name);
178 if (na > nb)
179 return SYMBOL_A;
180 else if (na < nb)
181 return SYMBOL_B;
182
183 return arch__choose_best_symbol(syma, symb);
184}
185
186void symbols__fixup_duplicate(struct rb_root_cached *symbols)
187{
188 struct rb_node *nd;
189 struct symbol *curr, *next;
190
191 if (symbol_conf.allow_aliases)
192 return;
193
194 nd = rb_first_cached(symbols);
195
196 while (nd) {
197 curr = rb_entry(nd, struct symbol, rb_node);
198again:
199 nd = rb_next(&curr->rb_node);
200 next = rb_entry(nd, struct symbol, rb_node);
201
202 if (!nd)
203 break;
204
205 if (curr->start != next->start)
206 continue;
207
208 if (choose_best_symbol(curr, next) == SYMBOL_A) {
209 rb_erase_cached(&next->rb_node, symbols);
210 symbol__delete(next);
211 goto again;
212 } else {
213 nd = rb_next(&curr->rb_node);
214 rb_erase_cached(&curr->rb_node, symbols);
215 symbol__delete(curr);
216 }
217 }
218}
219
220void symbols__fixup_end(struct rb_root_cached *symbols)
221{
222 struct rb_node *nd, *prevnd = rb_first_cached(symbols);
223 struct symbol *curr, *prev;
224
225 if (prevnd == NULL)
226 return;
227
228 curr = rb_entry(prevnd, struct symbol, rb_node);
229
230 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
231 prev = curr;
232 curr = rb_entry(nd, struct symbol, rb_node);
233
234 if (prev->end == prev->start && prev->end != curr->start)
235 arch__symbols__fixup_end(prev, curr);
236 }
237
238 /* Last entry */
239 if (curr->end == curr->start)
240 curr->end = roundup(curr->start, 4096) + 4096;
241}
242
243void maps__fixup_end(struct maps *maps)
244{
245 struct map *prev = NULL, *curr;
246
247 down_write(&maps->lock);
248
249 maps__for_each_entry(maps, curr) {
250 if (prev != NULL && !prev->end)
251 prev->end = curr->start;
252
253 prev = curr;
254 }
255
256 /*
257 * We still haven't the actual symbols, so guess the
258 * last map final address.
259 */
260 if (curr && !curr->end)
261 curr->end = ~0ULL;
262
263 up_write(&maps->lock);
264}
265
266struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
267{
268 size_t namelen = strlen(name) + 1;
269 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
270 sizeof(*sym) + namelen));
271 if (sym == NULL)
272 return NULL;
273
274 if (symbol_conf.priv_size) {
275 if (symbol_conf.init_annotation) {
276 struct annotation *notes = (void *)sym;
277 pthread_mutex_init(¬es->lock, NULL);
278 }
279 sym = ((void *)sym) + symbol_conf.priv_size;
280 }
281
282 sym->start = start;
283 sym->end = len ? start + len : start;
284 sym->type = type;
285 sym->binding = binding;
286 sym->namelen = namelen - 1;
287
288 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
289 __func__, name, start, sym->end);
290 memcpy(sym->name, name, namelen);
291
292 return sym;
293}
294
295void symbol__delete(struct symbol *sym)
296{
297 free(((void *)sym) - symbol_conf.priv_size);
298}
299
300void symbols__delete(struct rb_root_cached *symbols)
301{
302 struct symbol *pos;
303 struct rb_node *next = rb_first_cached(symbols);
304
305 while (next) {
306 pos = rb_entry(next, struct symbol, rb_node);
307 next = rb_next(&pos->rb_node);
308 rb_erase_cached(&pos->rb_node, symbols);
309 symbol__delete(pos);
310 }
311}
312
313void __symbols__insert(struct rb_root_cached *symbols,
314 struct symbol *sym, bool kernel)
315{
316 struct rb_node **p = &symbols->rb_root.rb_node;
317 struct rb_node *parent = NULL;
318 const u64 ip = sym->start;
319 struct symbol *s;
320 bool leftmost = true;
321
322 if (kernel) {
323 const char *name = sym->name;
324 /*
325 * ppc64 uses function descriptors and appends a '.' to the
326 * start of every instruction address. Remove it.
327 */
328 if (name[0] == '.')
329 name++;
330 sym->idle = symbol__is_idle(name);
331 }
332
333 while (*p != NULL) {
334 parent = *p;
335 s = rb_entry(parent, struct symbol, rb_node);
336 if (ip < s->start)
337 p = &(*p)->rb_left;
338 else {
339 p = &(*p)->rb_right;
340 leftmost = false;
341 }
342 }
343 rb_link_node(&sym->rb_node, parent, p);
344 rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
345}
346
347void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
348{
349 __symbols__insert(symbols, sym, false);
350}
351
352static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
353{
354 struct rb_node *n;
355
356 if (symbols == NULL)
357 return NULL;
358
359 n = symbols->rb_root.rb_node;
360
361 while (n) {
362 struct symbol *s = rb_entry(n, struct symbol, rb_node);
363
364 if (ip < s->start)
365 n = n->rb_left;
366 else if (ip > s->end || (ip == s->end && ip != s->start))
367 n = n->rb_right;
368 else
369 return s;
370 }
371
372 return NULL;
373}
374
375static struct symbol *symbols__first(struct rb_root_cached *symbols)
376{
377 struct rb_node *n = rb_first_cached(symbols);
378
379 if (n)
380 return rb_entry(n, struct symbol, rb_node);
381
382 return NULL;
383}
384
385static struct symbol *symbols__last(struct rb_root_cached *symbols)
386{
387 struct rb_node *n = rb_last(&symbols->rb_root);
388
389 if (n)
390 return rb_entry(n, struct symbol, rb_node);
391
392 return NULL;
393}
394
395static struct symbol *symbols__next(struct symbol *sym)
396{
397 struct rb_node *n = rb_next(&sym->rb_node);
398
399 if (n)
400 return rb_entry(n, struct symbol, rb_node);
401
402 return NULL;
403}
404
405static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
406{
407 struct rb_node **p = &symbols->rb_root.rb_node;
408 struct rb_node *parent = NULL;
409 struct symbol_name_rb_node *symn, *s;
410 bool leftmost = true;
411
412 symn = container_of(sym, struct symbol_name_rb_node, sym);
413
414 while (*p != NULL) {
415 parent = *p;
416 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
417 if (strcmp(sym->name, s->sym.name) < 0)
418 p = &(*p)->rb_left;
419 else {
420 p = &(*p)->rb_right;
421 leftmost = false;
422 }
423 }
424 rb_link_node(&symn->rb_node, parent, p);
425 rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
426}
427
428static void symbols__sort_by_name(struct rb_root_cached *symbols,
429 struct rb_root_cached *source)
430{
431 struct rb_node *nd;
432
433 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
434 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
435 symbols__insert_by_name(symbols, pos);
436 }
437}
438
439int symbol__match_symbol_name(const char *name, const char *str,
440 enum symbol_tag_include includes)
441{
442 const char *versioning;
443
444 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
445 (versioning = strstr(name, "@@"))) {
446 int len = strlen(str);
447
448 if (len < versioning - name)
449 len = versioning - name;
450
451 return arch__compare_symbol_names_n(name, str, len);
452 } else
453 return arch__compare_symbol_names(name, str);
454}
455
456static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
457 const char *name,
458 enum symbol_tag_include includes)
459{
460 struct rb_node *n;
461 struct symbol_name_rb_node *s = NULL;
462
463 if (symbols == NULL)
464 return NULL;
465
466 n = symbols->rb_root.rb_node;
467
468 while (n) {
469 int cmp;
470
471 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
472 cmp = symbol__match_symbol_name(s->sym.name, name, includes);
473
474 if (cmp > 0)
475 n = n->rb_left;
476 else if (cmp < 0)
477 n = n->rb_right;
478 else
479 break;
480 }
481
482 if (n == NULL)
483 return NULL;
484
485 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
486 /* return first symbol that has same name (if any) */
487 for (n = rb_prev(n); n; n = rb_prev(n)) {
488 struct symbol_name_rb_node *tmp;
489
490 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
491 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
492 break;
493
494 s = tmp;
495 }
496
497 return &s->sym;
498}
499
500void dso__reset_find_symbol_cache(struct dso *dso)
501{
502 dso->last_find_result.addr = 0;
503 dso->last_find_result.symbol = NULL;
504}
505
506void dso__insert_symbol(struct dso *dso, struct symbol *sym)
507{
508 __symbols__insert(&dso->symbols, sym, dso->kernel);
509
510 /* update the symbol cache if necessary */
511 if (dso->last_find_result.addr >= sym->start &&
512 (dso->last_find_result.addr < sym->end ||
513 sym->start == sym->end)) {
514 dso->last_find_result.symbol = sym;
515 }
516}
517
518struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
519{
520 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
521 dso->last_find_result.addr = addr;
522 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
523 }
524
525 return dso->last_find_result.symbol;
526}
527
528struct symbol *dso__first_symbol(struct dso *dso)
529{
530 return symbols__first(&dso->symbols);
531}
532
533struct symbol *dso__last_symbol(struct dso *dso)
534{
535 return symbols__last(&dso->symbols);
536}
537
538struct symbol *dso__next_symbol(struct symbol *sym)
539{
540 return symbols__next(sym);
541}
542
543struct symbol *symbol__next_by_name(struct symbol *sym)
544{
545 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
546 struct rb_node *n = rb_next(&s->rb_node);
547
548 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
549}
550
551 /*
552 * Returns first symbol that matched with @name.
553 */
554struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
555{
556 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
557 SYMBOL_TAG_INCLUDE__NONE);
558 if (!s)
559 s = symbols__find_by_name(&dso->symbol_names, name,
560 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
561 return s;
562}
563
564void dso__sort_by_name(struct dso *dso)
565{
566 dso__set_sorted_by_name(dso);
567 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
568}
569
570/*
571 * While we find nice hex chars, build a long_val.
572 * Return number of chars processed.
573 */
574static int hex2u64(const char *ptr, u64 *long_val)
575{
576 char *p;
577
578 *long_val = strtoull(ptr, &p, 16);
579
580 return p - ptr;
581}
582
583
584int modules__parse(const char *filename, void *arg,
585 int (*process_module)(void *arg, const char *name,
586 u64 start, u64 size))
587{
588 char *line = NULL;
589 size_t n;
590 FILE *file;
591 int err = 0;
592
593 file = fopen(filename, "r");
594 if (file == NULL)
595 return -1;
596
597 while (1) {
598 char name[PATH_MAX];
599 u64 start, size;
600 char *sep, *endptr;
601 ssize_t line_len;
602
603 line_len = getline(&line, &n, file);
604 if (line_len < 0) {
605 if (feof(file))
606 break;
607 err = -1;
608 goto out;
609 }
610
611 if (!line) {
612 err = -1;
613 goto out;
614 }
615
616 line[--line_len] = '\0'; /* \n */
617
618 sep = strrchr(line, 'x');
619 if (sep == NULL)
620 continue;
621
622 hex2u64(sep + 1, &start);
623
624 sep = strchr(line, ' ');
625 if (sep == NULL)
626 continue;
627
628 *sep = '\0';
629
630 scnprintf(name, sizeof(name), "[%s]", line);
631
632 size = strtoul(sep + 1, &endptr, 0);
633 if (*endptr != ' ' && *endptr != '\t')
634 continue;
635
636 err = process_module(arg, name, start, size);
637 if (err)
638 break;
639 }
640out:
641 free(line);
642 fclose(file);
643 return err;
644}
645
646/*
647 * These are symbols in the kernel image, so make sure that
648 * sym is from a kernel DSO.
649 */
650static bool symbol__is_idle(const char *name)
651{
652 const char * const idle_symbols[] = {
653 "acpi_idle_do_entry",
654 "acpi_processor_ffh_cstate_enter",
655 "arch_cpu_idle",
656 "cpu_idle",
657 "cpu_startup_entry",
658 "idle_cpu",
659 "intel_idle",
660 "default_idle",
661 "native_safe_halt",
662 "enter_idle",
663 "exit_idle",
664 "mwait_idle",
665 "mwait_idle_with_hints",
666 "mwait_idle_with_hints.constprop.0",
667 "poll_idle",
668 "ppc64_runlatch_off",
669 "pseries_dedicated_idle_sleep",
670 "psw_idle",
671 "psw_idle_exit",
672 NULL
673 };
674 int i;
675 static struct strlist *idle_symbols_list;
676
677 if (idle_symbols_list)
678 return strlist__has_entry(idle_symbols_list, name);
679
680 idle_symbols_list = strlist__new(NULL, NULL);
681
682 for (i = 0; idle_symbols[i]; i++)
683 strlist__add(idle_symbols_list, idle_symbols[i]);
684
685 return strlist__has_entry(idle_symbols_list, name);
686}
687
688static int map__process_kallsym_symbol(void *arg, const char *name,
689 char type, u64 start)
690{
691 struct symbol *sym;
692 struct dso *dso = arg;
693 struct rb_root_cached *root = &dso->symbols;
694
695 if (!symbol_type__filter(type))
696 return 0;
697
698 /*
699 * module symbols are not sorted so we add all
700 * symbols, setting length to 0, and rely on
701 * symbols__fixup_end() to fix it up.
702 */
703 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
704 if (sym == NULL)
705 return -ENOMEM;
706 /*
707 * We will pass the symbols to the filter later, in
708 * map__split_kallsyms, when we have split the maps per module
709 */
710 __symbols__insert(root, sym, !strchr(name, '['));
711
712 return 0;
713}
714
715/*
716 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
717 * so that we can in the next step set the symbol ->end address and then
718 * call kernel_maps__split_kallsyms.
719 */
720static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
721{
722 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
723}
724
725static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
726{
727 struct map *curr_map;
728 struct symbol *pos;
729 int count = 0;
730 struct rb_root_cached old_root = dso->symbols;
731 struct rb_root_cached *root = &dso->symbols;
732 struct rb_node *next = rb_first_cached(root);
733
734 if (!kmaps)
735 return -1;
736
737 *root = RB_ROOT_CACHED;
738
739 while (next) {
740 char *module;
741
742 pos = rb_entry(next, struct symbol, rb_node);
743 next = rb_next(&pos->rb_node);
744
745 rb_erase_cached(&pos->rb_node, &old_root);
746 RB_CLEAR_NODE(&pos->rb_node);
747 module = strchr(pos->name, '\t');
748 if (module)
749 *module = '\0';
750
751 curr_map = maps__find(kmaps, pos->start);
752
753 if (!curr_map) {
754 symbol__delete(pos);
755 continue;
756 }
757
758 pos->start -= curr_map->start - curr_map->pgoff;
759 if (pos->end > curr_map->end)
760 pos->end = curr_map->end;
761 if (pos->end)
762 pos->end -= curr_map->start - curr_map->pgoff;
763 symbols__insert(&curr_map->dso->symbols, pos);
764 ++count;
765 }
766
767 /* Symbols have been adjusted */
768 dso->adjust_symbols = 1;
769
770 return count;
771}
772
773/*
774 * Split the symbols into maps, making sure there are no overlaps, i.e. the
775 * kernel range is broken in several maps, named [kernel].N, as we don't have
776 * the original ELF section names vmlinux have.
777 */
778static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
779 struct map *initial_map)
780{
781 struct machine *machine;
782 struct map *curr_map = initial_map;
783 struct symbol *pos;
784 int count = 0, moved = 0;
785 struct rb_root_cached *root = &dso->symbols;
786 struct rb_node *next = rb_first_cached(root);
787 int kernel_range = 0;
788 bool x86_64;
789
790 if (!kmaps)
791 return -1;
792
793 machine = kmaps->machine;
794
795 x86_64 = machine__is(machine, "x86_64");
796
797 while (next) {
798 char *module;
799
800 pos = rb_entry(next, struct symbol, rb_node);
801 next = rb_next(&pos->rb_node);
802
803 module = strchr(pos->name, '\t');
804 if (module) {
805 if (!symbol_conf.use_modules)
806 goto discard_symbol;
807
808 *module++ = '\0';
809
810 if (strcmp(curr_map->dso->short_name, module)) {
811 if (curr_map != initial_map &&
812 dso->kernel == DSO_SPACE__KERNEL_GUEST &&
813 machine__is_default_guest(machine)) {
814 /*
815 * We assume all symbols of a module are
816 * continuous in * kallsyms, so curr_map
817 * points to a module and all its
818 * symbols are in its kmap. Mark it as
819 * loaded.
820 */
821 dso__set_loaded(curr_map->dso);
822 }
823
824 curr_map = maps__find_by_name(kmaps, module);
825 if (curr_map == NULL) {
826 pr_debug("%s/proc/{kallsyms,modules} "
827 "inconsistency while looking "
828 "for \"%s\" module!\n",
829 machine->root_dir, module);
830 curr_map = initial_map;
831 goto discard_symbol;
832 }
833
834 if (curr_map->dso->loaded &&
835 !machine__is_default_guest(machine))
836 goto discard_symbol;
837 }
838 /*
839 * So that we look just like we get from .ko files,
840 * i.e. not prelinked, relative to initial_map->start.
841 */
842 pos->start = curr_map->map_ip(curr_map, pos->start);
843 pos->end = curr_map->map_ip(curr_map, pos->end);
844 } else if (x86_64 && is_entry_trampoline(pos->name)) {
845 /*
846 * These symbols are not needed anymore since the
847 * trampoline maps refer to the text section and it's
848 * symbols instead. Avoid having to deal with
849 * relocations, and the assumption that the first symbol
850 * is the start of kernel text, by simply removing the
851 * symbols at this point.
852 */
853 goto discard_symbol;
854 } else if (curr_map != initial_map) {
855 char dso_name[PATH_MAX];
856 struct dso *ndso;
857
858 if (delta) {
859 /* Kernel was relocated at boot time */
860 pos->start -= delta;
861 pos->end -= delta;
862 }
863
864 if (count == 0) {
865 curr_map = initial_map;
866 goto add_symbol;
867 }
868
869 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
870 snprintf(dso_name, sizeof(dso_name),
871 "[guest.kernel].%d",
872 kernel_range++);
873 else
874 snprintf(dso_name, sizeof(dso_name),
875 "[kernel].%d",
876 kernel_range++);
877
878 ndso = dso__new(dso_name);
879 if (ndso == NULL)
880 return -1;
881
882 ndso->kernel = dso->kernel;
883
884 curr_map = map__new2(pos->start, ndso);
885 if (curr_map == NULL) {
886 dso__put(ndso);
887 return -1;
888 }
889
890 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
891 maps__insert(kmaps, curr_map);
892 ++kernel_range;
893 } else if (delta) {
894 /* Kernel was relocated at boot time */
895 pos->start -= delta;
896 pos->end -= delta;
897 }
898add_symbol:
899 if (curr_map != initial_map) {
900 rb_erase_cached(&pos->rb_node, root);
901 symbols__insert(&curr_map->dso->symbols, pos);
902 ++moved;
903 } else
904 ++count;
905
906 continue;
907discard_symbol:
908 rb_erase_cached(&pos->rb_node, root);
909 symbol__delete(pos);
910 }
911
912 if (curr_map != initial_map &&
913 dso->kernel == DSO_SPACE__KERNEL_GUEST &&
914 machine__is_default_guest(kmaps->machine)) {
915 dso__set_loaded(curr_map->dso);
916 }
917
918 return count + moved;
919}
920
921bool symbol__restricted_filename(const char *filename,
922 const char *restricted_filename)
923{
924 bool restricted = false;
925
926 if (symbol_conf.kptr_restrict) {
927 char *r = realpath(filename, NULL);
928
929 if (r != NULL) {
930 restricted = strcmp(r, restricted_filename) == 0;
931 free(r);
932 return restricted;
933 }
934 }
935
936 return restricted;
937}
938
939struct module_info {
940 struct rb_node rb_node;
941 char *name;
942 u64 start;
943};
944
945static void add_module(struct module_info *mi, struct rb_root *modules)
946{
947 struct rb_node **p = &modules->rb_node;
948 struct rb_node *parent = NULL;
949 struct module_info *m;
950
951 while (*p != NULL) {
952 parent = *p;
953 m = rb_entry(parent, struct module_info, rb_node);
954 if (strcmp(mi->name, m->name) < 0)
955 p = &(*p)->rb_left;
956 else
957 p = &(*p)->rb_right;
958 }
959 rb_link_node(&mi->rb_node, parent, p);
960 rb_insert_color(&mi->rb_node, modules);
961}
962
963static void delete_modules(struct rb_root *modules)
964{
965 struct module_info *mi;
966 struct rb_node *next = rb_first(modules);
967
968 while (next) {
969 mi = rb_entry(next, struct module_info, rb_node);
970 next = rb_next(&mi->rb_node);
971 rb_erase(&mi->rb_node, modules);
972 zfree(&mi->name);
973 free(mi);
974 }
975}
976
977static struct module_info *find_module(const char *name,
978 struct rb_root *modules)
979{
980 struct rb_node *n = modules->rb_node;
981
982 while (n) {
983 struct module_info *m;
984 int cmp;
985
986 m = rb_entry(n, struct module_info, rb_node);
987 cmp = strcmp(name, m->name);
988 if (cmp < 0)
989 n = n->rb_left;
990 else if (cmp > 0)
991 n = n->rb_right;
992 else
993 return m;
994 }
995
996 return NULL;
997}
998
999static int __read_proc_modules(void *arg, const char *name, u64 start,
1000 u64 size __maybe_unused)
1001{
1002 struct rb_root *modules = arg;
1003 struct module_info *mi;
1004
1005 mi = zalloc(sizeof(struct module_info));
1006 if (!mi)
1007 return -ENOMEM;
1008
1009 mi->name = strdup(name);
1010 mi->start = start;
1011
1012 if (!mi->name) {
1013 free(mi);
1014 return -ENOMEM;
1015 }
1016
1017 add_module(mi, modules);
1018
1019 return 0;
1020}
1021
1022static int read_proc_modules(const char *filename, struct rb_root *modules)
1023{
1024 if (symbol__restricted_filename(filename, "/proc/modules"))
1025 return -1;
1026
1027 if (modules__parse(filename, modules, __read_proc_modules)) {
1028 delete_modules(modules);
1029 return -1;
1030 }
1031
1032 return 0;
1033}
1034
1035int compare_proc_modules(const char *from, const char *to)
1036{
1037 struct rb_root from_modules = RB_ROOT;
1038 struct rb_root to_modules = RB_ROOT;
1039 struct rb_node *from_node, *to_node;
1040 struct module_info *from_m, *to_m;
1041 int ret = -1;
1042
1043 if (read_proc_modules(from, &from_modules))
1044 return -1;
1045
1046 if (read_proc_modules(to, &to_modules))
1047 goto out_delete_from;
1048
1049 from_node = rb_first(&from_modules);
1050 to_node = rb_first(&to_modules);
1051 while (from_node) {
1052 if (!to_node)
1053 break;
1054
1055 from_m = rb_entry(from_node, struct module_info, rb_node);
1056 to_m = rb_entry(to_node, struct module_info, rb_node);
1057
1058 if (from_m->start != to_m->start ||
1059 strcmp(from_m->name, to_m->name))
1060 break;
1061
1062 from_node = rb_next(from_node);
1063 to_node = rb_next(to_node);
1064 }
1065
1066 if (!from_node && !to_node)
1067 ret = 0;
1068
1069 delete_modules(&to_modules);
1070out_delete_from:
1071 delete_modules(&from_modules);
1072
1073 return ret;
1074}
1075
1076static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
1077{
1078 struct rb_root modules = RB_ROOT;
1079 struct map *old_map;
1080 int err;
1081
1082 err = read_proc_modules(filename, &modules);
1083 if (err)
1084 return err;
1085
1086 maps__for_each_entry(kmaps, old_map) {
1087 struct module_info *mi;
1088
1089 if (!__map__is_kmodule(old_map)) {
1090 continue;
1091 }
1092
1093 /* Module must be in memory at the same address */
1094 mi = find_module(old_map->dso->short_name, &modules);
1095 if (!mi || mi->start != old_map->start) {
1096 err = -EINVAL;
1097 goto out;
1098 }
1099 }
1100out:
1101 delete_modules(&modules);
1102 return err;
1103}
1104
1105/*
1106 * If kallsyms is referenced by name then we look for filename in the same
1107 * directory.
1108 */
1109static bool filename_from_kallsyms_filename(char *filename,
1110 const char *base_name,
1111 const char *kallsyms_filename)
1112{
1113 char *name;
1114
1115 strcpy(filename, kallsyms_filename);
1116 name = strrchr(filename, '/');
1117 if (!name)
1118 return false;
1119
1120 name += 1;
1121
1122 if (!strcmp(name, "kallsyms")) {
1123 strcpy(name, base_name);
1124 return true;
1125 }
1126
1127 return false;
1128}
1129
1130static int validate_kcore_modules(const char *kallsyms_filename,
1131 struct map *map)
1132{
1133 struct maps *kmaps = map__kmaps(map);
1134 char modules_filename[PATH_MAX];
1135
1136 if (!kmaps)
1137 return -EINVAL;
1138
1139 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1140 kallsyms_filename))
1141 return -EINVAL;
1142
1143 if (do_validate_kcore_modules(modules_filename, kmaps))
1144 return -EINVAL;
1145
1146 return 0;
1147}
1148
1149static int validate_kcore_addresses(const char *kallsyms_filename,
1150 struct map *map)
1151{
1152 struct kmap *kmap = map__kmap(map);
1153
1154 if (!kmap)
1155 return -EINVAL;
1156
1157 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1158 u64 start;
1159
1160 if (kallsyms__get_function_start(kallsyms_filename,
1161 kmap->ref_reloc_sym->name, &start))
1162 return -ENOENT;
1163 if (start != kmap->ref_reloc_sym->addr)
1164 return -EINVAL;
1165 }
1166
1167 return validate_kcore_modules(kallsyms_filename, map);
1168}
1169
1170struct kcore_mapfn_data {
1171 struct dso *dso;
1172 struct list_head maps;
1173};
1174
1175static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1176{
1177 struct kcore_mapfn_data *md = data;
1178 struct map *map;
1179
1180 map = map__new2(start, md->dso);
1181 if (map == NULL)
1182 return -ENOMEM;
1183
1184 map->end = map->start + len;
1185 map->pgoff = pgoff;
1186
1187 list_add(&map->node, &md->maps);
1188
1189 return 0;
1190}
1191
1192/*
1193 * Merges map into maps by splitting the new map within the existing map
1194 * regions.
1195 */
1196int maps__merge_in(struct maps *kmaps, struct map *new_map)
1197{
1198 struct map *old_map;
1199 LIST_HEAD(merged);
1200
1201 maps__for_each_entry(kmaps, old_map) {
1202 /* no overload with this one */
1203 if (new_map->end < old_map->start ||
1204 new_map->start >= old_map->end)
1205 continue;
1206
1207 if (new_map->start < old_map->start) {
1208 /*
1209 * |new......
1210 * |old....
1211 */
1212 if (new_map->end < old_map->end) {
1213 /*
1214 * |new......| -> |new..|
1215 * |old....| -> |old....|
1216 */
1217 new_map->end = old_map->start;
1218 } else {
1219 /*
1220 * |new.............| -> |new..| |new..|
1221 * |old....| -> |old....|
1222 */
1223 struct map *m = map__clone(new_map);
1224
1225 if (!m)
1226 return -ENOMEM;
1227
1228 m->end = old_map->start;
1229 list_add_tail(&m->node, &merged);
1230 new_map->pgoff += old_map->end - new_map->start;
1231 new_map->start = old_map->end;
1232 }
1233 } else {
1234 /*
1235 * |new......
1236 * |old....
1237 */
1238 if (new_map->end < old_map->end) {
1239 /*
1240 * |new..| -> x
1241 * |old.........| -> |old.........|
1242 */
1243 map__put(new_map);
1244 new_map = NULL;
1245 break;
1246 } else {
1247 /*
1248 * |new......| -> |new...|
1249 * |old....| -> |old....|
1250 */
1251 new_map->pgoff += old_map->end - new_map->start;
1252 new_map->start = old_map->end;
1253 }
1254 }
1255 }
1256
1257 while (!list_empty(&merged)) {
1258 old_map = list_entry(merged.next, struct map, node);
1259 list_del_init(&old_map->node);
1260 maps__insert(kmaps, old_map);
1261 map__put(old_map);
1262 }
1263
1264 if (new_map) {
1265 maps__insert(kmaps, new_map);
1266 map__put(new_map);
1267 }
1268 return 0;
1269}
1270
1271static int dso__load_kcore(struct dso *dso, struct map *map,
1272 const char *kallsyms_filename)
1273{
1274 struct maps *kmaps = map__kmaps(map);
1275 struct kcore_mapfn_data md;
1276 struct map *old_map, *new_map, *replacement_map = NULL, *next;
1277 struct machine *machine;
1278 bool is_64_bit;
1279 int err, fd;
1280 char kcore_filename[PATH_MAX];
1281 u64 stext;
1282
1283 if (!kmaps)
1284 return -EINVAL;
1285
1286 machine = kmaps->machine;
1287
1288 /* This function requires that the map is the kernel map */
1289 if (!__map__is_kernel(map))
1290 return -EINVAL;
1291
1292 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1293 kallsyms_filename))
1294 return -EINVAL;
1295
1296 /* Modules and kernel must be present at their original addresses */
1297 if (validate_kcore_addresses(kallsyms_filename, map))
1298 return -EINVAL;
1299
1300 md.dso = dso;
1301 INIT_LIST_HEAD(&md.maps);
1302
1303 fd = open(kcore_filename, O_RDONLY);
1304 if (fd < 0) {
1305 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1306 kcore_filename);
1307 return -EINVAL;
1308 }
1309
1310 /* Read new maps into temporary lists */
1311 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1312 &is_64_bit);
1313 if (err)
1314 goto out_err;
1315 dso->is_64_bit = is_64_bit;
1316
1317 if (list_empty(&md.maps)) {
1318 err = -EINVAL;
1319 goto out_err;
1320 }
1321
1322 /* Remove old maps */
1323 maps__for_each_entry_safe(kmaps, old_map, next) {
1324 /*
1325 * We need to preserve eBPF maps even if they are
1326 * covered by kcore, because we need to access
1327 * eBPF dso for source data.
1328 */
1329 if (old_map != map && !__map__is_bpf_prog(old_map))
1330 maps__remove(kmaps, old_map);
1331 }
1332 machine->trampolines_mapped = false;
1333
1334 /* Find the kernel map using the '_stext' symbol */
1335 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1336 list_for_each_entry(new_map, &md.maps, node) {
1337 if (stext >= new_map->start && stext < new_map->end) {
1338 replacement_map = new_map;
1339 break;
1340 }
1341 }
1342 }
1343
1344 if (!replacement_map)
1345 replacement_map = list_entry(md.maps.next, struct map, node);
1346
1347 /* Add new maps */
1348 while (!list_empty(&md.maps)) {
1349 new_map = list_entry(md.maps.next, struct map, node);
1350 list_del_init(&new_map->node);
1351 if (new_map == replacement_map) {
1352 map->start = new_map->start;
1353 map->end = new_map->end;
1354 map->pgoff = new_map->pgoff;
1355 map->map_ip = new_map->map_ip;
1356 map->unmap_ip = new_map->unmap_ip;
1357 /* Ensure maps are correctly ordered */
1358 map__get(map);
1359 maps__remove(kmaps, map);
1360 maps__insert(kmaps, map);
1361 map__put(map);
1362 map__put(new_map);
1363 } else {
1364 /*
1365 * Merge kcore map into existing maps,
1366 * and ensure that current maps (eBPF)
1367 * stay intact.
1368 */
1369 if (maps__merge_in(kmaps, new_map))
1370 goto out_err;
1371 }
1372 }
1373
1374 if (machine__is(machine, "x86_64")) {
1375 u64 addr;
1376
1377 /*
1378 * If one of the corresponding symbols is there, assume the
1379 * entry trampoline maps are too.
1380 */
1381 if (!kallsyms__get_function_start(kallsyms_filename,
1382 ENTRY_TRAMPOLINE_NAME,
1383 &addr))
1384 machine->trampolines_mapped = true;
1385 }
1386
1387 /*
1388 * Set the data type and long name so that kcore can be read via
1389 * dso__data_read_addr().
1390 */
1391 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1392 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1393 else
1394 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1395 dso__set_long_name(dso, strdup(kcore_filename), true);
1396
1397 close(fd);
1398
1399 if (map->prot & PROT_EXEC)
1400 pr_debug("Using %s for kernel object code\n", kcore_filename);
1401 else
1402 pr_debug("Using %s for kernel data\n", kcore_filename);
1403
1404 return 0;
1405
1406out_err:
1407 while (!list_empty(&md.maps)) {
1408 map = list_entry(md.maps.next, struct map, node);
1409 list_del_init(&map->node);
1410 map__put(map);
1411 }
1412 close(fd);
1413 return -EINVAL;
1414}
1415
1416/*
1417 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1418 * delta based on the relocation reference symbol.
1419 */
1420static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1421{
1422 u64 addr;
1423
1424 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1425 return 0;
1426
1427 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1428 return -1;
1429
1430 *delta = addr - kmap->ref_reloc_sym->addr;
1431 return 0;
1432}
1433
1434int __dso__load_kallsyms(struct dso *dso, const char *filename,
1435 struct map *map, bool no_kcore)
1436{
1437 struct kmap *kmap = map__kmap(map);
1438 u64 delta = 0;
1439
1440 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1441 return -1;
1442
1443 if (!kmap || !kmap->kmaps)
1444 return -1;
1445
1446 if (dso__load_all_kallsyms(dso, filename) < 0)
1447 return -1;
1448
1449 if (kallsyms__delta(kmap, filename, &delta))
1450 return -1;
1451
1452 symbols__fixup_end(&dso->symbols);
1453 symbols__fixup_duplicate(&dso->symbols);
1454
1455 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1456 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1457 else
1458 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1459
1460 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1461 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
1462 else
1463 return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
1464}
1465
1466int dso__load_kallsyms(struct dso *dso, const char *filename,
1467 struct map *map)
1468{
1469 return __dso__load_kallsyms(dso, filename, map, false);
1470}
1471
1472static int dso__load_perf_map(const char *map_path, struct dso *dso)
1473{
1474 char *line = NULL;
1475 size_t n;
1476 FILE *file;
1477 int nr_syms = 0;
1478
1479 file = fopen(map_path, "r");
1480 if (file == NULL)
1481 goto out_failure;
1482
1483 while (!feof(file)) {
1484 u64 start, size;
1485 struct symbol *sym;
1486 int line_len, len;
1487
1488 line_len = getline(&line, &n, file);
1489 if (line_len < 0)
1490 break;
1491
1492 if (!line)
1493 goto out_failure;
1494
1495 line[--line_len] = '\0'; /* \n */
1496
1497 len = hex2u64(line, &start);
1498
1499 len++;
1500 if (len + 2 >= line_len)
1501 continue;
1502
1503 len += hex2u64(line + len, &size);
1504
1505 len++;
1506 if (len + 2 >= line_len)
1507 continue;
1508
1509 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1510
1511 if (sym == NULL)
1512 goto out_delete_line;
1513
1514 symbols__insert(&dso->symbols, sym);
1515 nr_syms++;
1516 }
1517
1518 free(line);
1519 fclose(file);
1520
1521 return nr_syms;
1522
1523out_delete_line:
1524 free(line);
1525out_failure:
1526 return -1;
1527}
1528
1529static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1530 enum dso_binary_type type)
1531{
1532 switch (type) {
1533 case DSO_BINARY_TYPE__JAVA_JIT:
1534 case DSO_BINARY_TYPE__DEBUGLINK:
1535 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1536 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1537 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1538 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1539 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1540 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1541 return !kmod && dso->kernel == DSO_SPACE__USER;
1542
1543 case DSO_BINARY_TYPE__KALLSYMS:
1544 case DSO_BINARY_TYPE__VMLINUX:
1545 case DSO_BINARY_TYPE__KCORE:
1546 return dso->kernel == DSO_SPACE__KERNEL;
1547
1548 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1549 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1550 case DSO_BINARY_TYPE__GUEST_KCORE:
1551 return dso->kernel == DSO_SPACE__KERNEL_GUEST;
1552
1553 case DSO_BINARY_TYPE__GUEST_KMODULE:
1554 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1555 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1556 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1557 /*
1558 * kernel modules know their symtab type - it's set when
1559 * creating a module dso in machine__addnew_module_map().
1560 */
1561 return kmod && dso->symtab_type == type;
1562
1563 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1564 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1565 return true;
1566
1567 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1568 case DSO_BINARY_TYPE__BPF_IMAGE:
1569 case DSO_BINARY_TYPE__OOL:
1570 case DSO_BINARY_TYPE__NOT_FOUND:
1571 default:
1572 return false;
1573 }
1574}
1575
1576/* Checks for the existence of the perf-<pid>.map file in two different
1577 * locations. First, if the process is a separate mount namespace, check in
1578 * that namespace using the pid of the innermost pid namespace. If's not in a
1579 * namespace, or the file can't be found there, try in the mount namespace of
1580 * the tracing process using our view of its pid.
1581 */
1582static int dso__find_perf_map(char *filebuf, size_t bufsz,
1583 struct nsinfo **nsip)
1584{
1585 struct nscookie nsc;
1586 struct nsinfo *nsi;
1587 struct nsinfo *nnsi;
1588 int rc = -1;
1589
1590 nsi = *nsip;
1591
1592 if (nsi->need_setns) {
1593 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1594 nsinfo__mountns_enter(nsi, &nsc);
1595 rc = access(filebuf, R_OK);
1596 nsinfo__mountns_exit(&nsc);
1597 if (rc == 0)
1598 return rc;
1599 }
1600
1601 nnsi = nsinfo__copy(nsi);
1602 if (nnsi) {
1603 nsinfo__put(nsi);
1604
1605 nnsi->need_setns = false;
1606 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1607 *nsip = nnsi;
1608 rc = 0;
1609 }
1610
1611 return rc;
1612}
1613
1614int dso__load(struct dso *dso, struct map *map)
1615{
1616 char *name;
1617 int ret = -1;
1618 u_int i;
1619 struct machine *machine = NULL;
1620 char *root_dir = (char *) "";
1621 int ss_pos = 0;
1622 struct symsrc ss_[2];
1623 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1624 bool kmod;
1625 bool perfmap;
1626 unsigned char build_id[BUILD_ID_SIZE];
1627 struct nscookie nsc;
1628 char newmapname[PATH_MAX];
1629 const char *map_path = dso->long_name;
1630
1631 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1632 if (perfmap) {
1633 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1634 sizeof(newmapname), &dso->nsinfo) == 0)) {
1635 map_path = newmapname;
1636 }
1637 }
1638
1639 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1640 pthread_mutex_lock(&dso->lock);
1641
1642 /* check again under the dso->lock */
1643 if (dso__loaded(dso)) {
1644 ret = 1;
1645 goto out;
1646 }
1647
1648 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1649 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1650 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1651 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1652
1653 if (dso->kernel && !kmod) {
1654 if (dso->kernel == DSO_SPACE__KERNEL)
1655 ret = dso__load_kernel_sym(dso, map);
1656 else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1657 ret = dso__load_guest_kernel_sym(dso, map);
1658
1659 machine = map__kmaps(map)->machine;
1660 if (machine__is(machine, "x86_64"))
1661 machine__map_x86_64_entry_trampolines(machine, dso);
1662 goto out;
1663 }
1664
1665 dso->adjust_symbols = 0;
1666
1667 if (perfmap) {
1668 ret = dso__load_perf_map(map_path, dso);
1669 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1670 DSO_BINARY_TYPE__NOT_FOUND;
1671 goto out;
1672 }
1673
1674 if (machine)
1675 root_dir = machine->root_dir;
1676
1677 name = malloc(PATH_MAX);
1678 if (!name)
1679 goto out;
1680
1681 /*
1682 * Read the build id if possible. This is required for
1683 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1684 */
1685 if (!dso->has_build_id &&
1686 is_regular_file(dso->long_name)) {
1687 __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1688 if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1689 dso__set_build_id(dso, build_id);
1690 }
1691
1692 /*
1693 * Iterate over candidate debug images.
1694 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1695 * and/or opd section) for processing.
1696 */
1697 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1698 struct symsrc *ss = &ss_[ss_pos];
1699 bool next_slot = false;
1700 bool is_reg;
1701 bool nsexit;
1702 int sirc = -1;
1703
1704 enum dso_binary_type symtab_type = binary_type_symtab[i];
1705
1706 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1707 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1708
1709 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1710 continue;
1711
1712 if (dso__read_binary_type_filename(dso, symtab_type,
1713 root_dir, name, PATH_MAX))
1714 continue;
1715
1716 if (nsexit)
1717 nsinfo__mountns_exit(&nsc);
1718
1719 is_reg = is_regular_file(name);
1720 if (is_reg)
1721 sirc = symsrc__init(ss, dso, name, symtab_type);
1722
1723 if (nsexit)
1724 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1725
1726 if (!is_reg || sirc < 0)
1727 continue;
1728
1729 if (!syms_ss && symsrc__has_symtab(ss)) {
1730 syms_ss = ss;
1731 next_slot = true;
1732 if (!dso->symsrc_filename)
1733 dso->symsrc_filename = strdup(name);
1734 }
1735
1736 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1737 runtime_ss = ss;
1738 next_slot = true;
1739 }
1740
1741 if (next_slot) {
1742 ss_pos++;
1743
1744 if (syms_ss && runtime_ss)
1745 break;
1746 } else {
1747 symsrc__destroy(ss);
1748 }
1749
1750 }
1751
1752 if (!runtime_ss && !syms_ss)
1753 goto out_free;
1754
1755 if (runtime_ss && !syms_ss) {
1756 syms_ss = runtime_ss;
1757 }
1758
1759 /* We'll have to hope for the best */
1760 if (!runtime_ss && syms_ss)
1761 runtime_ss = syms_ss;
1762
1763 if (syms_ss)
1764 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1765 else
1766 ret = -1;
1767
1768 if (ret > 0) {
1769 int nr_plt;
1770
1771 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1772 if (nr_plt > 0)
1773 ret += nr_plt;
1774 }
1775
1776 for (; ss_pos > 0; ss_pos--)
1777 symsrc__destroy(&ss_[ss_pos - 1]);
1778out_free:
1779 free(name);
1780 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1781 ret = 0;
1782out:
1783 dso__set_loaded(dso);
1784 pthread_mutex_unlock(&dso->lock);
1785 nsinfo__mountns_exit(&nsc);
1786
1787 return ret;
1788}
1789
1790static int map__strcmp(const void *a, const void *b)
1791{
1792 const struct map *ma = *(const struct map **)a, *mb = *(const struct map **)b;
1793 return strcmp(ma->dso->short_name, mb->dso->short_name);
1794}
1795
1796static int map__strcmp_name(const void *name, const void *b)
1797{
1798 const struct map *map = *(const struct map **)b;
1799 return strcmp(name, map->dso->short_name);
1800}
1801
1802void __maps__sort_by_name(struct maps *maps)
1803{
1804 qsort(maps->maps_by_name, maps->nr_maps, sizeof(struct map *), map__strcmp);
1805}
1806
1807static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
1808{
1809 struct map *map;
1810 struct map **maps_by_name = realloc(maps->maps_by_name, maps->nr_maps * sizeof(map));
1811 int i = 0;
1812
1813 if (maps_by_name == NULL)
1814 return -1;
1815
1816 maps->maps_by_name = maps_by_name;
1817 maps->nr_maps_allocated = maps->nr_maps;
1818
1819 maps__for_each_entry(maps, map)
1820 maps_by_name[i++] = map;
1821
1822 __maps__sort_by_name(maps);
1823 return 0;
1824}
1825
1826static struct map *__maps__find_by_name(struct maps *maps, const char *name)
1827{
1828 struct map **mapp;
1829
1830 if (maps->maps_by_name == NULL &&
1831 map__groups__sort_by_name_from_rbtree(maps))
1832 return NULL;
1833
1834 mapp = bsearch(name, maps->maps_by_name, maps->nr_maps, sizeof(*mapp), map__strcmp_name);
1835 if (mapp)
1836 return *mapp;
1837 return NULL;
1838}
1839
1840struct map *maps__find_by_name(struct maps *maps, const char *name)
1841{
1842 struct map *map;
1843
1844 down_read(&maps->lock);
1845
1846 if (maps->last_search_by_name && strcmp(maps->last_search_by_name->dso->short_name, name) == 0) {
1847 map = maps->last_search_by_name;
1848 goto out_unlock;
1849 }
1850 /*
1851 * If we have maps->maps_by_name, then the name isn't in the rbtree,
1852 * as maps->maps_by_name mirrors the rbtree when lookups by name are
1853 * made.
1854 */
1855 map = __maps__find_by_name(maps, name);
1856 if (map || maps->maps_by_name != NULL)
1857 goto out_unlock;
1858
1859 /* Fallback to traversing the rbtree... */
1860 maps__for_each_entry(maps, map)
1861 if (strcmp(map->dso->short_name, name) == 0) {
1862 maps->last_search_by_name = map;
1863 goto out_unlock;
1864 }
1865
1866 map = NULL;
1867
1868out_unlock:
1869 up_read(&maps->lock);
1870 return map;
1871}
1872
1873int dso__load_vmlinux(struct dso *dso, struct map *map,
1874 const char *vmlinux, bool vmlinux_allocated)
1875{
1876 int err = -1;
1877 struct symsrc ss;
1878 char symfs_vmlinux[PATH_MAX];
1879 enum dso_binary_type symtab_type;
1880
1881 if (vmlinux[0] == '/')
1882 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1883 else
1884 symbol__join_symfs(symfs_vmlinux, vmlinux);
1885
1886 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1887 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1888 else
1889 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1890
1891 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1892 return -1;
1893
1894 err = dso__load_sym(dso, map, &ss, &ss, 0);
1895 symsrc__destroy(&ss);
1896
1897 if (err > 0) {
1898 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1899 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1900 else
1901 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1902 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1903 dso__set_loaded(dso);
1904 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1905 }
1906
1907 return err;
1908}
1909
1910int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1911{
1912 int i, err = 0;
1913 char *filename = NULL;
1914
1915 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1916 vmlinux_path__nr_entries + 1);
1917
1918 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1919 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1920 if (err > 0)
1921 goto out;
1922 }
1923
1924 if (!symbol_conf.ignore_vmlinux_buildid)
1925 filename = dso__build_id_filename(dso, NULL, 0, false);
1926 if (filename != NULL) {
1927 err = dso__load_vmlinux(dso, map, filename, true);
1928 if (err > 0)
1929 goto out;
1930 free(filename);
1931 }
1932out:
1933 return err;
1934}
1935
1936static bool visible_dir_filter(const char *name, struct dirent *d)
1937{
1938 if (d->d_type != DT_DIR)
1939 return false;
1940 return lsdir_no_dot_filter(name, d);
1941}
1942
1943static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1944{
1945 char kallsyms_filename[PATH_MAX];
1946 int ret = -1;
1947 struct strlist *dirs;
1948 struct str_node *nd;
1949
1950 dirs = lsdir(dir, visible_dir_filter);
1951 if (!dirs)
1952 return -1;
1953
1954 strlist__for_each_entry(nd, dirs) {
1955 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1956 "%s/%s/kallsyms", dir, nd->s);
1957 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1958 strlcpy(dir, kallsyms_filename, dir_sz);
1959 ret = 0;
1960 break;
1961 }
1962 }
1963
1964 strlist__delete(dirs);
1965
1966 return ret;
1967}
1968
1969/*
1970 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1971 * since access(R_OK) only checks with real UID/GID but open() use effective
1972 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1973 */
1974static bool filename__readable(const char *file)
1975{
1976 int fd = open(file, O_RDONLY);
1977 if (fd < 0)
1978 return false;
1979 close(fd);
1980 return true;
1981}
1982
1983static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1984{
1985 u8 host_build_id[BUILD_ID_SIZE];
1986 char sbuild_id[SBUILD_ID_SIZE];
1987 bool is_host = false;
1988 char path[PATH_MAX];
1989
1990 if (!dso->has_build_id) {
1991 /*
1992 * Last resort, if we don't have a build-id and couldn't find
1993 * any vmlinux file, try the running kernel kallsyms table.
1994 */
1995 goto proc_kallsyms;
1996 }
1997
1998 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1999 sizeof(host_build_id)) == 0)
2000 is_host = dso__build_id_equal(dso, host_build_id);
2001
2002 /* Try a fast path for /proc/kallsyms if possible */
2003 if (is_host) {
2004 /*
2005 * Do not check the build-id cache, unless we know we cannot use
2006 * /proc/kcore or module maps don't match to /proc/kallsyms.
2007 * To check readability of /proc/kcore, do not use access(R_OK)
2008 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
2009 * can't check it.
2010 */
2011 if (filename__readable("/proc/kcore") &&
2012 !validate_kcore_addresses("/proc/kallsyms", map))
2013 goto proc_kallsyms;
2014 }
2015
2016 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
2017
2018 /* Find kallsyms in build-id cache with kcore */
2019 scnprintf(path, sizeof(path), "%s/%s/%s",
2020 buildid_dir, DSO__NAME_KCORE, sbuild_id);
2021
2022 if (!find_matching_kcore(map, path, sizeof(path)))
2023 return strdup(path);
2024
2025 /* Use current /proc/kallsyms if possible */
2026 if (is_host) {
2027proc_kallsyms:
2028 return strdup("/proc/kallsyms");
2029 }
2030
2031 /* Finally, find a cache of kallsyms */
2032 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
2033 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
2034 sbuild_id);
2035 return NULL;
2036 }
2037
2038 return strdup(path);
2039}
2040
2041static int dso__load_kernel_sym(struct dso *dso, struct map *map)
2042{
2043 int err;
2044 const char *kallsyms_filename = NULL;
2045 char *kallsyms_allocated_filename = NULL;
2046 /*
2047 * Step 1: if the user specified a kallsyms or vmlinux filename, use
2048 * it and only it, reporting errors to the user if it cannot be used.
2049 *
2050 * For instance, try to analyse an ARM perf.data file _without_ a
2051 * build-id, or if the user specifies the wrong path to the right
2052 * vmlinux file, obviously we can't fallback to another vmlinux (a
2053 * x86_86 one, on the machine where analysis is being performed, say),
2054 * or worse, /proc/kallsyms.
2055 *
2056 * If the specified file _has_ a build-id and there is a build-id
2057 * section in the perf.data file, we will still do the expected
2058 * validation in dso__load_vmlinux and will bail out if they don't
2059 * match.
2060 */
2061 if (symbol_conf.kallsyms_name != NULL) {
2062 kallsyms_filename = symbol_conf.kallsyms_name;
2063 goto do_kallsyms;
2064 }
2065
2066 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2067 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2068 }
2069
2070 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2071 err = dso__load_vmlinux_path(dso, map);
2072 if (err > 0)
2073 return err;
2074 }
2075
2076 /* do not try local files if a symfs was given */
2077 if (symbol_conf.symfs[0] != 0)
2078 return -1;
2079
2080 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2081 if (!kallsyms_allocated_filename)
2082 return -1;
2083
2084 kallsyms_filename = kallsyms_allocated_filename;
2085
2086do_kallsyms:
2087 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2088 if (err > 0)
2089 pr_debug("Using %s for symbols\n", kallsyms_filename);
2090 free(kallsyms_allocated_filename);
2091
2092 if (err > 0 && !dso__is_kcore(dso)) {
2093 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2094 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2095 map__fixup_start(map);
2096 map__fixup_end(map);
2097 }
2098
2099 return err;
2100}
2101
2102static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2103{
2104 int err;
2105 const char *kallsyms_filename = NULL;
2106 struct machine *machine = map__kmaps(map)->machine;
2107 char path[PATH_MAX];
2108
2109 if (machine__is_default_guest(machine)) {
2110 /*
2111 * if the user specified a vmlinux filename, use it and only
2112 * it, reporting errors to the user if it cannot be used.
2113 * Or use file guest_kallsyms inputted by user on commandline
2114 */
2115 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2116 err = dso__load_vmlinux(dso, map,
2117 symbol_conf.default_guest_vmlinux_name,
2118 false);
2119 return err;
2120 }
2121
2122 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2123 if (!kallsyms_filename)
2124 return -1;
2125 } else {
2126 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2127 kallsyms_filename = path;
2128 }
2129
2130 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2131 if (err > 0)
2132 pr_debug("Using %s for symbols\n", kallsyms_filename);
2133 if (err > 0 && !dso__is_kcore(dso)) {
2134 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2135 dso__set_long_name(dso, machine->mmap_name, false);
2136 map__fixup_start(map);
2137 map__fixup_end(map);
2138 }
2139
2140 return err;
2141}
2142
2143static void vmlinux_path__exit(void)
2144{
2145 while (--vmlinux_path__nr_entries >= 0)
2146 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2147 vmlinux_path__nr_entries = 0;
2148
2149 zfree(&vmlinux_path);
2150}
2151
2152static const char * const vmlinux_paths[] = {
2153 "vmlinux",
2154 "/boot/vmlinux"
2155};
2156
2157static const char * const vmlinux_paths_upd[] = {
2158 "/boot/vmlinux-%s",
2159 "/usr/lib/debug/boot/vmlinux-%s",
2160 "/lib/modules/%s/build/vmlinux",
2161 "/usr/lib/debug/lib/modules/%s/vmlinux",
2162 "/usr/lib/debug/boot/vmlinux-%s.debug"
2163};
2164
2165static int vmlinux_path__add(const char *new_entry)
2166{
2167 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2168 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2169 return -1;
2170 ++vmlinux_path__nr_entries;
2171
2172 return 0;
2173}
2174
2175static int vmlinux_path__init(struct perf_env *env)
2176{
2177 struct utsname uts;
2178 char bf[PATH_MAX];
2179 char *kernel_version;
2180 unsigned int i;
2181
2182 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2183 ARRAY_SIZE(vmlinux_paths_upd)));
2184 if (vmlinux_path == NULL)
2185 return -1;
2186
2187 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2188 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2189 goto out_fail;
2190
2191 /* only try kernel version if no symfs was given */
2192 if (symbol_conf.symfs[0] != 0)
2193 return 0;
2194
2195 if (env) {
2196 kernel_version = env->os_release;
2197 } else {
2198 if (uname(&uts) < 0)
2199 goto out_fail;
2200
2201 kernel_version = uts.release;
2202 }
2203
2204 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2205 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2206 if (vmlinux_path__add(bf) < 0)
2207 goto out_fail;
2208 }
2209
2210 return 0;
2211
2212out_fail:
2213 vmlinux_path__exit();
2214 return -1;
2215}
2216
2217int setup_list(struct strlist **list, const char *list_str,
2218 const char *list_name)
2219{
2220 if (list_str == NULL)
2221 return 0;
2222
2223 *list = strlist__new(list_str, NULL);
2224 if (!*list) {
2225 pr_err("problems parsing %s list\n", list_name);
2226 return -1;
2227 }
2228
2229 symbol_conf.has_filter = true;
2230 return 0;
2231}
2232
2233int setup_intlist(struct intlist **list, const char *list_str,
2234 const char *list_name)
2235{
2236 if (list_str == NULL)
2237 return 0;
2238
2239 *list = intlist__new(list_str);
2240 if (!*list) {
2241 pr_err("problems parsing %s list\n", list_name);
2242 return -1;
2243 }
2244 return 0;
2245}
2246
2247static bool symbol__read_kptr_restrict(void)
2248{
2249 bool value = false;
2250 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2251
2252 if (fp != NULL) {
2253 char line[8];
2254
2255 if (fgets(line, sizeof(line), fp) != NULL)
2256 value = perf_cap__capable(CAP_SYSLOG) ?
2257 (atoi(line) >= 2) :
2258 (atoi(line) != 0);
2259
2260 fclose(fp);
2261 }
2262
2263 /* Per kernel/kallsyms.c:
2264 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2265 */
2266 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2267 value = true;
2268
2269 return value;
2270}
2271
2272int symbol__annotation_init(void)
2273{
2274 if (symbol_conf.init_annotation)
2275 return 0;
2276
2277 if (symbol_conf.initialized) {
2278 pr_err("Annotation needs to be init before symbol__init()\n");
2279 return -1;
2280 }
2281
2282 symbol_conf.priv_size += sizeof(struct annotation);
2283 symbol_conf.init_annotation = true;
2284 return 0;
2285}
2286
2287int symbol__init(struct perf_env *env)
2288{
2289 const char *symfs;
2290
2291 if (symbol_conf.initialized)
2292 return 0;
2293
2294 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2295
2296 symbol__elf_init();
2297
2298 if (symbol_conf.sort_by_name)
2299 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2300 sizeof(struct symbol));
2301
2302 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2303 return -1;
2304
2305 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2306 pr_err("'.' is the only non valid --field-separator argument\n");
2307 return -1;
2308 }
2309
2310 if (setup_list(&symbol_conf.dso_list,
2311 symbol_conf.dso_list_str, "dso") < 0)
2312 return -1;
2313
2314 if (setup_list(&symbol_conf.comm_list,
2315 symbol_conf.comm_list_str, "comm") < 0)
2316 goto out_free_dso_list;
2317
2318 if (setup_intlist(&symbol_conf.pid_list,
2319 symbol_conf.pid_list_str, "pid") < 0)
2320 goto out_free_comm_list;
2321
2322 if (setup_intlist(&symbol_conf.tid_list,
2323 symbol_conf.tid_list_str, "tid") < 0)
2324 goto out_free_pid_list;
2325
2326 if (setup_list(&symbol_conf.sym_list,
2327 symbol_conf.sym_list_str, "symbol") < 0)
2328 goto out_free_tid_list;
2329
2330 if (setup_list(&symbol_conf.bt_stop_list,
2331 symbol_conf.bt_stop_list_str, "symbol") < 0)
2332 goto out_free_sym_list;
2333
2334 /*
2335 * A path to symbols of "/" is identical to ""
2336 * reset here for simplicity.
2337 */
2338 symfs = realpath(symbol_conf.symfs, NULL);
2339 if (symfs == NULL)
2340 symfs = symbol_conf.symfs;
2341 if (strcmp(symfs, "/") == 0)
2342 symbol_conf.symfs = "";
2343 if (symfs != symbol_conf.symfs)
2344 free((void *)symfs);
2345
2346 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2347
2348 symbol_conf.initialized = true;
2349 return 0;
2350
2351out_free_sym_list:
2352 strlist__delete(symbol_conf.sym_list);
2353out_free_tid_list:
2354 intlist__delete(symbol_conf.tid_list);
2355out_free_pid_list:
2356 intlist__delete(symbol_conf.pid_list);
2357out_free_comm_list:
2358 strlist__delete(symbol_conf.comm_list);
2359out_free_dso_list:
2360 strlist__delete(symbol_conf.dso_list);
2361 return -1;
2362}
2363
2364void symbol__exit(void)
2365{
2366 if (!symbol_conf.initialized)
2367 return;
2368 strlist__delete(symbol_conf.bt_stop_list);
2369 strlist__delete(symbol_conf.sym_list);
2370 strlist__delete(symbol_conf.dso_list);
2371 strlist__delete(symbol_conf.comm_list);
2372 intlist__delete(symbol_conf.tid_list);
2373 intlist__delete(symbol_conf.pid_list);
2374 vmlinux_path__exit();
2375 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2376 symbol_conf.bt_stop_list = NULL;
2377 symbol_conf.initialized = false;
2378}
2379
2380int symbol__config_symfs(const struct option *opt __maybe_unused,
2381 const char *dir, int unset __maybe_unused)
2382{
2383 char *bf = NULL;
2384 int ret;
2385
2386 symbol_conf.symfs = strdup(dir);
2387 if (symbol_conf.symfs == NULL)
2388 return -ENOMEM;
2389
2390 /* skip the locally configured cache if a symfs is given, and
2391 * config buildid dir to symfs/.debug
2392 */
2393 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2394 if (ret < 0)
2395 return -ENOMEM;
2396
2397 set_buildid_dir(bf);
2398
2399 free(bf);
2400 return 0;
2401}
2402
2403struct mem_info *mem_info__get(struct mem_info *mi)
2404{
2405 if (mi)
2406 refcount_inc(&mi->refcnt);
2407 return mi;
2408}
2409
2410void mem_info__put(struct mem_info *mi)
2411{
2412 if (mi && refcount_dec_and_test(&mi->refcnt))
2413 free(mi);
2414}
2415
2416struct mem_info *mem_info__new(void)
2417{
2418 struct mem_info *mi = zalloc(sizeof(*mi));
2419
2420 if (mi)
2421 refcount_set(&mi->refcnt, 1);
2422 return mi;
2423}