Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <asm/bug.h>
3#include <linux/kernel.h>
4#include <linux/string.h>
5#include <linux/zalloc.h>
6#include <sys/time.h>
7#include <sys/resource.h>
8#include <sys/types.h>
9#include <sys/stat.h>
10#include <unistd.h>
11#include <errno.h>
12#include <fcntl.h>
13#include <stdlib.h>
14#include <bpf/libbpf.h>
15#include "bpf-event.h"
16#include "compress.h"
17#include "env.h"
18#include "namespaces.h"
19#include "path.h"
20#include "map.h"
21#include "symbol.h"
22#include "srcline.h"
23#include "dso.h"
24#include "dsos.h"
25#include "machine.h"
26#include "auxtrace.h"
27#include "util.h" /* O_CLOEXEC for older systems */
28#include "debug.h"
29#include "string2.h"
30#include "vdso.h"
31
32static const char * const debuglink_paths[] = {
33 "%.0s%s",
34 "%s/%s",
35 "%s/.debug/%s",
36 "/usr/lib/debug%s/%s"
37};
38
39char dso__symtab_origin(const struct dso *dso)
40{
41 static const char origin[] = {
42 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
43 [DSO_BINARY_TYPE__VMLINUX] = 'v',
44 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
45 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
46 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
47 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
48 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
49 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
50 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
51 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
52 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
53 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
54 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
55 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
56 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
57 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
58 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
59 };
60
61 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
62 return '!';
63 return origin[dso->symtab_type];
64}
65
66int dso__read_binary_type_filename(const struct dso *dso,
67 enum dso_binary_type type,
68 char *root_dir, char *filename, size_t size)
69{
70 char build_id_hex[SBUILD_ID_SIZE];
71 int ret = 0;
72 size_t len;
73
74 switch (type) {
75 case DSO_BINARY_TYPE__DEBUGLINK:
76 {
77 const char *last_slash;
78 char dso_dir[PATH_MAX];
79 char symfile[PATH_MAX];
80 unsigned int i;
81
82 len = __symbol__join_symfs(filename, size, dso->long_name);
83 last_slash = filename + len;
84 while (last_slash != filename && *last_slash != '/')
85 last_slash--;
86
87 strncpy(dso_dir, filename, last_slash - filename);
88 dso_dir[last_slash-filename] = '\0';
89
90 if (!is_regular_file(filename)) {
91 ret = -1;
92 break;
93 }
94
95 ret = filename__read_debuglink(filename, symfile, PATH_MAX);
96 if (ret)
97 break;
98
99 /* Check predefined locations where debug file might reside */
100 ret = -1;
101 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
102 snprintf(filename, size,
103 debuglink_paths[i], dso_dir, symfile);
104 if (is_regular_file(filename)) {
105 ret = 0;
106 break;
107 }
108 }
109
110 break;
111 }
112 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
113 if (dso__build_id_filename(dso, filename, size, false) == NULL)
114 ret = -1;
115 break;
116
117 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
118 if (dso__build_id_filename(dso, filename, size, true) == NULL)
119 ret = -1;
120 break;
121
122 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
123 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
124 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
125 break;
126
127 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
128 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
129 snprintf(filename + len, size - len, "%s", dso->long_name);
130 break;
131
132 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
133 {
134 const char *last_slash;
135 size_t dir_size;
136
137 last_slash = dso->long_name + dso->long_name_len;
138 while (last_slash != dso->long_name && *last_slash != '/')
139 last_slash--;
140
141 len = __symbol__join_symfs(filename, size, "");
142 dir_size = last_slash - dso->long_name + 2;
143 if (dir_size > (size - len)) {
144 ret = -1;
145 break;
146 }
147 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
148 len += scnprintf(filename + len , size - len, ".debug%s",
149 last_slash);
150 break;
151 }
152
153 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
154 if (!dso->has_build_id) {
155 ret = -1;
156 break;
157 }
158
159 build_id__sprintf(dso->build_id,
160 sizeof(dso->build_id),
161 build_id_hex);
162 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
163 snprintf(filename + len, size - len, "%.2s/%s.debug",
164 build_id_hex, build_id_hex + 2);
165 break;
166
167 case DSO_BINARY_TYPE__VMLINUX:
168 case DSO_BINARY_TYPE__GUEST_VMLINUX:
169 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
170 __symbol__join_symfs(filename, size, dso->long_name);
171 break;
172
173 case DSO_BINARY_TYPE__GUEST_KMODULE:
174 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
175 path__join3(filename, size, symbol_conf.symfs,
176 root_dir, dso->long_name);
177 break;
178
179 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
180 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
181 __symbol__join_symfs(filename, size, dso->long_name);
182 break;
183
184 case DSO_BINARY_TYPE__KCORE:
185 case DSO_BINARY_TYPE__GUEST_KCORE:
186 snprintf(filename, size, "%s", dso->long_name);
187 break;
188
189 default:
190 case DSO_BINARY_TYPE__KALLSYMS:
191 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
192 case DSO_BINARY_TYPE__JAVA_JIT:
193 case DSO_BINARY_TYPE__BPF_PROG_INFO:
194 case DSO_BINARY_TYPE__NOT_FOUND:
195 ret = -1;
196 break;
197 }
198
199 return ret;
200}
201
202enum {
203 COMP_ID__NONE = 0,
204};
205
206static const struct {
207 const char *fmt;
208 int (*decompress)(const char *input, int output);
209 bool (*is_compressed)(const char *input);
210} compressions[] = {
211 [COMP_ID__NONE] = { .fmt = NULL, },
212#ifdef HAVE_ZLIB_SUPPORT
213 { "gz", gzip_decompress_to_file, gzip_is_compressed },
214#endif
215#ifdef HAVE_LZMA_SUPPORT
216 { "xz", lzma_decompress_to_file, lzma_is_compressed },
217#endif
218 { NULL, NULL, NULL },
219};
220
221static int is_supported_compression(const char *ext)
222{
223 unsigned i;
224
225 for (i = 1; compressions[i].fmt; i++) {
226 if (!strcmp(ext, compressions[i].fmt))
227 return i;
228 }
229 return COMP_ID__NONE;
230}
231
232bool is_kernel_module(const char *pathname, int cpumode)
233{
234 struct kmod_path m;
235 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
236
237 WARN_ONCE(mode != cpumode,
238 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
239 cpumode);
240
241 switch (mode) {
242 case PERF_RECORD_MISC_USER:
243 case PERF_RECORD_MISC_HYPERVISOR:
244 case PERF_RECORD_MISC_GUEST_USER:
245 return false;
246 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
247 default:
248 if (kmod_path__parse(&m, pathname)) {
249 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
250 pathname);
251 return true;
252 }
253 }
254
255 return m.kmod;
256}
257
258bool dso__needs_decompress(struct dso *dso)
259{
260 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
261 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
262}
263
264static int decompress_kmodule(struct dso *dso, const char *name,
265 char *pathname, size_t len)
266{
267 char tmpbuf[] = KMOD_DECOMP_NAME;
268 int fd = -1;
269
270 if (!dso__needs_decompress(dso))
271 return -1;
272
273 if (dso->comp == COMP_ID__NONE)
274 return -1;
275
276 /*
277 * We have proper compression id for DSO and yet the file
278 * behind the 'name' can still be plain uncompressed object.
279 *
280 * The reason is behind the logic we open the DSO object files,
281 * when we try all possible 'debug' objects until we find the
282 * data. So even if the DSO is represented by 'krava.xz' module,
283 * we can end up here opening ~/.debug/....23432432/debug' file
284 * which is not compressed.
285 *
286 * To keep this transparent, we detect this and return the file
287 * descriptor to the uncompressed file.
288 */
289 if (!compressions[dso->comp].is_compressed(name))
290 return open(name, O_RDONLY);
291
292 fd = mkstemp(tmpbuf);
293 if (fd < 0) {
294 dso->load_errno = errno;
295 return -1;
296 }
297
298 if (compressions[dso->comp].decompress(name, fd)) {
299 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
300 close(fd);
301 fd = -1;
302 }
303
304 if (!pathname || (fd < 0))
305 unlink(tmpbuf);
306
307 if (pathname && (fd >= 0))
308 strlcpy(pathname, tmpbuf, len);
309
310 return fd;
311}
312
313int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
314{
315 return decompress_kmodule(dso, name, NULL, 0);
316}
317
318int dso__decompress_kmodule_path(struct dso *dso, const char *name,
319 char *pathname, size_t len)
320{
321 int fd = decompress_kmodule(dso, name, pathname, len);
322
323 close(fd);
324 return fd >= 0 ? 0 : -1;
325}
326
327/*
328 * Parses kernel module specified in @path and updates
329 * @m argument like:
330 *
331 * @comp - true if @path contains supported compression suffix,
332 * false otherwise
333 * @kmod - true if @path contains '.ko' suffix in right position,
334 * false otherwise
335 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
336 * of the kernel module without suffixes, otherwise strudup-ed
337 * base name of @path
338 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
339 * the compression suffix
340 *
341 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
342 */
343int __kmod_path__parse(struct kmod_path *m, const char *path,
344 bool alloc_name)
345{
346 const char *name = strrchr(path, '/');
347 const char *ext = strrchr(path, '.');
348 bool is_simple_name = false;
349
350 memset(m, 0x0, sizeof(*m));
351 name = name ? name + 1 : path;
352
353 /*
354 * '.' is also a valid character for module name. For example:
355 * [aaa.bbb] is a valid module name. '[' should have higher
356 * priority than '.ko' suffix.
357 *
358 * The kernel names are from machine__mmap_name. Such
359 * name should belong to kernel itself, not kernel module.
360 */
361 if (name[0] == '[') {
362 is_simple_name = true;
363 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
364 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
365 (strncmp(name, "[vdso]", 6) == 0) ||
366 (strncmp(name, "[vdso32]", 8) == 0) ||
367 (strncmp(name, "[vdsox32]", 9) == 0) ||
368 (strncmp(name, "[vsyscall]", 10) == 0)) {
369 m->kmod = false;
370
371 } else
372 m->kmod = true;
373 }
374
375 /* No extension, just return name. */
376 if ((ext == NULL) || is_simple_name) {
377 if (alloc_name) {
378 m->name = strdup(name);
379 return m->name ? 0 : -ENOMEM;
380 }
381 return 0;
382 }
383
384 m->comp = is_supported_compression(ext + 1);
385 if (m->comp > COMP_ID__NONE)
386 ext -= 3;
387
388 /* Check .ko extension only if there's enough name left. */
389 if (ext > name)
390 m->kmod = !strncmp(ext, ".ko", 3);
391
392 if (alloc_name) {
393 if (m->kmod) {
394 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
395 return -ENOMEM;
396 } else {
397 if (asprintf(&m->name, "%s", name) == -1)
398 return -ENOMEM;
399 }
400
401 strreplace(m->name, '-', '_');
402 }
403
404 return 0;
405}
406
407void dso__set_module_info(struct dso *dso, struct kmod_path *m,
408 struct machine *machine)
409{
410 if (machine__is_host(machine))
411 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
412 else
413 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
414
415 /* _KMODULE_COMP should be next to _KMODULE */
416 if (m->kmod && m->comp) {
417 dso->symtab_type++;
418 dso->comp = m->comp;
419 }
420
421 dso__set_short_name(dso, strdup(m->name), true);
422}
423
424/*
425 * Global list of open DSOs and the counter.
426 */
427static LIST_HEAD(dso__data_open);
428static long dso__data_open_cnt;
429static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
430
431static void dso__list_add(struct dso *dso)
432{
433 list_add_tail(&dso->data.open_entry, &dso__data_open);
434 dso__data_open_cnt++;
435}
436
437static void dso__list_del(struct dso *dso)
438{
439 list_del_init(&dso->data.open_entry);
440 WARN_ONCE(dso__data_open_cnt <= 0,
441 "DSO data fd counter out of bounds.");
442 dso__data_open_cnt--;
443}
444
445static void close_first_dso(void);
446
447static int do_open(char *name)
448{
449 int fd;
450 char sbuf[STRERR_BUFSIZE];
451
452 do {
453 fd = open(name, O_RDONLY|O_CLOEXEC);
454 if (fd >= 0)
455 return fd;
456
457 pr_debug("dso open failed: %s\n",
458 str_error_r(errno, sbuf, sizeof(sbuf)));
459 if (!dso__data_open_cnt || errno != EMFILE)
460 break;
461
462 close_first_dso();
463 } while (1);
464
465 return -1;
466}
467
468static int __open_dso(struct dso *dso, struct machine *machine)
469{
470 int fd = -EINVAL;
471 char *root_dir = (char *)"";
472 char *name = malloc(PATH_MAX);
473 bool decomp = false;
474
475 if (!name)
476 return -ENOMEM;
477
478 if (machine)
479 root_dir = machine->root_dir;
480
481 if (dso__read_binary_type_filename(dso, dso->binary_type,
482 root_dir, name, PATH_MAX))
483 goto out;
484
485 if (!is_regular_file(name))
486 goto out;
487
488 if (dso__needs_decompress(dso)) {
489 char newpath[KMOD_DECOMP_LEN];
490 size_t len = sizeof(newpath);
491
492 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
493 fd = -dso->load_errno;
494 goto out;
495 }
496
497 decomp = true;
498 strcpy(name, newpath);
499 }
500
501 fd = do_open(name);
502
503 if (decomp)
504 unlink(name);
505
506out:
507 free(name);
508 return fd;
509}
510
511static void check_data_close(void);
512
513/**
514 * dso_close - Open DSO data file
515 * @dso: dso object
516 *
517 * Open @dso's data file descriptor and updates
518 * list/count of open DSO objects.
519 */
520static int open_dso(struct dso *dso, struct machine *machine)
521{
522 int fd;
523 struct nscookie nsc;
524
525 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
526 nsinfo__mountns_enter(dso->nsinfo, &nsc);
527 fd = __open_dso(dso, machine);
528 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
529 nsinfo__mountns_exit(&nsc);
530
531 if (fd >= 0) {
532 dso__list_add(dso);
533 /*
534 * Check if we crossed the allowed number
535 * of opened DSOs and close one if needed.
536 */
537 check_data_close();
538 }
539
540 return fd;
541}
542
543static void close_data_fd(struct dso *dso)
544{
545 if (dso->data.fd >= 0) {
546 close(dso->data.fd);
547 dso->data.fd = -1;
548 dso->data.file_size = 0;
549 dso__list_del(dso);
550 }
551}
552
553/**
554 * dso_close - Close DSO data file
555 * @dso: dso object
556 *
557 * Close @dso's data file descriptor and updates
558 * list/count of open DSO objects.
559 */
560static void close_dso(struct dso *dso)
561{
562 close_data_fd(dso);
563}
564
565static void close_first_dso(void)
566{
567 struct dso *dso;
568
569 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
570 close_dso(dso);
571}
572
573static rlim_t get_fd_limit(void)
574{
575 struct rlimit l;
576 rlim_t limit = 0;
577
578 /* Allow half of the current open fd limit. */
579 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
580 if (l.rlim_cur == RLIM_INFINITY)
581 limit = l.rlim_cur;
582 else
583 limit = l.rlim_cur / 2;
584 } else {
585 pr_err("failed to get fd limit\n");
586 limit = 1;
587 }
588
589 return limit;
590}
591
592static rlim_t fd_limit;
593
594/*
595 * Used only by tests/dso-data.c to reset the environment
596 * for tests. I dont expect we should change this during
597 * standard runtime.
598 */
599void reset_fd_limit(void)
600{
601 fd_limit = 0;
602}
603
604static bool may_cache_fd(void)
605{
606 if (!fd_limit)
607 fd_limit = get_fd_limit();
608
609 if (fd_limit == RLIM_INFINITY)
610 return true;
611
612 return fd_limit > (rlim_t) dso__data_open_cnt;
613}
614
615/*
616 * Check and close LRU dso if we crossed allowed limit
617 * for opened dso file descriptors. The limit is half
618 * of the RLIMIT_NOFILE files opened.
619*/
620static void check_data_close(void)
621{
622 bool cache_fd = may_cache_fd();
623
624 if (!cache_fd)
625 close_first_dso();
626}
627
628/**
629 * dso__data_close - Close DSO data file
630 * @dso: dso object
631 *
632 * External interface to close @dso's data file descriptor.
633 */
634void dso__data_close(struct dso *dso)
635{
636 pthread_mutex_lock(&dso__data_open_lock);
637 close_dso(dso);
638 pthread_mutex_unlock(&dso__data_open_lock);
639}
640
641static void try_to_open_dso(struct dso *dso, struct machine *machine)
642{
643 enum dso_binary_type binary_type_data[] = {
644 DSO_BINARY_TYPE__BUILD_ID_CACHE,
645 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
646 DSO_BINARY_TYPE__NOT_FOUND,
647 };
648 int i = 0;
649
650 if (dso->data.fd >= 0)
651 return;
652
653 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
654 dso->data.fd = open_dso(dso, machine);
655 goto out;
656 }
657
658 do {
659 dso->binary_type = binary_type_data[i++];
660
661 dso->data.fd = open_dso(dso, machine);
662 if (dso->data.fd >= 0)
663 goto out;
664
665 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
666out:
667 if (dso->data.fd >= 0)
668 dso->data.status = DSO_DATA_STATUS_OK;
669 else
670 dso->data.status = DSO_DATA_STATUS_ERROR;
671}
672
673/**
674 * dso__data_get_fd - Get dso's data file descriptor
675 * @dso: dso object
676 * @machine: machine object
677 *
678 * External interface to find dso's file, open it and
679 * returns file descriptor. It should be paired with
680 * dso__data_put_fd() if it returns non-negative value.
681 */
682int dso__data_get_fd(struct dso *dso, struct machine *machine)
683{
684 if (dso->data.status == DSO_DATA_STATUS_ERROR)
685 return -1;
686
687 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
688 return -1;
689
690 try_to_open_dso(dso, machine);
691
692 if (dso->data.fd < 0)
693 pthread_mutex_unlock(&dso__data_open_lock);
694
695 return dso->data.fd;
696}
697
698void dso__data_put_fd(struct dso *dso __maybe_unused)
699{
700 pthread_mutex_unlock(&dso__data_open_lock);
701}
702
703bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
704{
705 u32 flag = 1 << by;
706
707 if (dso->data.status_seen & flag)
708 return true;
709
710 dso->data.status_seen |= flag;
711
712 return false;
713}
714
715static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
716{
717 struct bpf_prog_info_node *node;
718 ssize_t size = DSO__DATA_CACHE_SIZE;
719 u64 len;
720 u8 *buf;
721
722 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
723 if (!node || !node->info_linear) {
724 dso->data.status = DSO_DATA_STATUS_ERROR;
725 return -1;
726 }
727
728 len = node->info_linear->info.jited_prog_len;
729 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
730
731 if (offset >= len)
732 return -1;
733
734 size = (ssize_t)min(len - offset, (u64)size);
735 memcpy(data, buf + offset, size);
736 return size;
737}
738
739static int bpf_size(struct dso *dso)
740{
741 struct bpf_prog_info_node *node;
742
743 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
744 if (!node || !node->info_linear) {
745 dso->data.status = DSO_DATA_STATUS_ERROR;
746 return -1;
747 }
748
749 dso->data.file_size = node->info_linear->info.jited_prog_len;
750 return 0;
751}
752
753static void
754dso_cache__free(struct dso *dso)
755{
756 struct rb_root *root = &dso->data.cache;
757 struct rb_node *next = rb_first(root);
758
759 pthread_mutex_lock(&dso->lock);
760 while (next) {
761 struct dso_cache *cache;
762
763 cache = rb_entry(next, struct dso_cache, rb_node);
764 next = rb_next(&cache->rb_node);
765 rb_erase(&cache->rb_node, root);
766 free(cache);
767 }
768 pthread_mutex_unlock(&dso->lock);
769}
770
771static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
772{
773 const struct rb_root *root = &dso->data.cache;
774 struct rb_node * const *p = &root->rb_node;
775 const struct rb_node *parent = NULL;
776 struct dso_cache *cache;
777
778 while (*p != NULL) {
779 u64 end;
780
781 parent = *p;
782 cache = rb_entry(parent, struct dso_cache, rb_node);
783 end = cache->offset + DSO__DATA_CACHE_SIZE;
784
785 if (offset < cache->offset)
786 p = &(*p)->rb_left;
787 else if (offset >= end)
788 p = &(*p)->rb_right;
789 else
790 return cache;
791 }
792
793 return NULL;
794}
795
796static struct dso_cache *
797dso_cache__insert(struct dso *dso, struct dso_cache *new)
798{
799 struct rb_root *root = &dso->data.cache;
800 struct rb_node **p = &root->rb_node;
801 struct rb_node *parent = NULL;
802 struct dso_cache *cache;
803 u64 offset = new->offset;
804
805 pthread_mutex_lock(&dso->lock);
806 while (*p != NULL) {
807 u64 end;
808
809 parent = *p;
810 cache = rb_entry(parent, struct dso_cache, rb_node);
811 end = cache->offset + DSO__DATA_CACHE_SIZE;
812
813 if (offset < cache->offset)
814 p = &(*p)->rb_left;
815 else if (offset >= end)
816 p = &(*p)->rb_right;
817 else
818 goto out;
819 }
820
821 rb_link_node(&new->rb_node, parent, p);
822 rb_insert_color(&new->rb_node, root);
823
824 cache = NULL;
825out:
826 pthread_mutex_unlock(&dso->lock);
827 return cache;
828}
829
830static ssize_t
831dso_cache__memcpy(struct dso_cache *cache, u64 offset,
832 u8 *data, u64 size)
833{
834 u64 cache_offset = offset - cache->offset;
835 u64 cache_size = min(cache->size - cache_offset, size);
836
837 memcpy(data, cache->data + cache_offset, cache_size);
838 return cache_size;
839}
840
841static ssize_t file_read(struct dso *dso, struct machine *machine,
842 u64 offset, char *data)
843{
844 ssize_t ret;
845
846 pthread_mutex_lock(&dso__data_open_lock);
847
848 /*
849 * dso->data.fd might be closed if other thread opened another
850 * file (dso) due to open file limit (RLIMIT_NOFILE).
851 */
852 try_to_open_dso(dso, machine);
853
854 if (dso->data.fd < 0) {
855 dso->data.status = DSO_DATA_STATUS_ERROR;
856 ret = -errno;
857 goto out;
858 }
859
860 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
861out:
862 pthread_mutex_unlock(&dso__data_open_lock);
863 return ret;
864}
865
866static ssize_t
867dso_cache__read(struct dso *dso, struct machine *machine,
868 u64 offset, u8 *data, ssize_t size)
869{
870 u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
871 struct dso_cache *cache;
872 struct dso_cache *old;
873 ssize_t ret;
874
875 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
876 if (!cache)
877 return -ENOMEM;
878
879 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
880 ret = bpf_read(dso, cache_offset, cache->data);
881 else
882 ret = file_read(dso, machine, cache_offset, cache->data);
883
884 if (ret > 0) {
885 cache->offset = cache_offset;
886 cache->size = ret;
887
888 old = dso_cache__insert(dso, cache);
889 if (old) {
890 /* we lose the race */
891 free(cache);
892 cache = old;
893 }
894
895 ret = dso_cache__memcpy(cache, offset, data, size);
896 }
897
898 if (ret <= 0)
899 free(cache);
900
901 return ret;
902}
903
904static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
905 u64 offset, u8 *data, ssize_t size)
906{
907 struct dso_cache *cache;
908
909 cache = dso_cache__find(dso, offset);
910 if (cache)
911 return dso_cache__memcpy(cache, offset, data, size);
912 else
913 return dso_cache__read(dso, machine, offset, data, size);
914}
915
916/*
917 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
918 * in the rb_tree. Any read to already cached data is served
919 * by cached data.
920 */
921static ssize_t cached_read(struct dso *dso, struct machine *machine,
922 u64 offset, u8 *data, ssize_t size)
923{
924 ssize_t r = 0;
925 u8 *p = data;
926
927 do {
928 ssize_t ret;
929
930 ret = dso_cache_read(dso, machine, offset, p, size);
931 if (ret < 0)
932 return ret;
933
934 /* Reached EOF, return what we have. */
935 if (!ret)
936 break;
937
938 BUG_ON(ret > size);
939
940 r += ret;
941 p += ret;
942 offset += ret;
943 size -= ret;
944
945 } while (size);
946
947 return r;
948}
949
950static int file_size(struct dso *dso, struct machine *machine)
951{
952 int ret = 0;
953 struct stat st;
954 char sbuf[STRERR_BUFSIZE];
955
956 pthread_mutex_lock(&dso__data_open_lock);
957
958 /*
959 * dso->data.fd might be closed if other thread opened another
960 * file (dso) due to open file limit (RLIMIT_NOFILE).
961 */
962 try_to_open_dso(dso, machine);
963
964 if (dso->data.fd < 0) {
965 ret = -errno;
966 dso->data.status = DSO_DATA_STATUS_ERROR;
967 goto out;
968 }
969
970 if (fstat(dso->data.fd, &st) < 0) {
971 ret = -errno;
972 pr_err("dso cache fstat failed: %s\n",
973 str_error_r(errno, sbuf, sizeof(sbuf)));
974 dso->data.status = DSO_DATA_STATUS_ERROR;
975 goto out;
976 }
977 dso->data.file_size = st.st_size;
978
979out:
980 pthread_mutex_unlock(&dso__data_open_lock);
981 return ret;
982}
983
984int dso__data_file_size(struct dso *dso, struct machine *machine)
985{
986 if (dso->data.file_size)
987 return 0;
988
989 if (dso->data.status == DSO_DATA_STATUS_ERROR)
990 return -1;
991
992 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
993 return bpf_size(dso);
994
995 return file_size(dso, machine);
996}
997
998/**
999 * dso__data_size - Return dso data size
1000 * @dso: dso object
1001 * @machine: machine object
1002 *
1003 * Return: dso data size
1004 */
1005off_t dso__data_size(struct dso *dso, struct machine *machine)
1006{
1007 if (dso__data_file_size(dso, machine))
1008 return -1;
1009
1010 /* For now just estimate dso data size is close to file size */
1011 return dso->data.file_size;
1012}
1013
1014static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
1015 u64 offset, u8 *data, ssize_t size)
1016{
1017 if (dso__data_file_size(dso, machine))
1018 return -1;
1019
1020 /* Check the offset sanity. */
1021 if (offset > dso->data.file_size)
1022 return -1;
1023
1024 if (offset + size < offset)
1025 return -1;
1026
1027 return cached_read(dso, machine, offset, data, size);
1028}
1029
1030/**
1031 * dso__data_read_offset - Read data from dso file offset
1032 * @dso: dso object
1033 * @machine: machine object
1034 * @offset: file offset
1035 * @data: buffer to store data
1036 * @size: size of the @data buffer
1037 *
1038 * External interface to read data from dso file offset. Open
1039 * dso data file and use cached_read to get the data.
1040 */
1041ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1042 u64 offset, u8 *data, ssize_t size)
1043{
1044 if (dso->data.status == DSO_DATA_STATUS_ERROR)
1045 return -1;
1046
1047 return data_read_offset(dso, machine, offset, data, size);
1048}
1049
1050/**
1051 * dso__data_read_addr - Read data from dso address
1052 * @dso: dso object
1053 * @machine: machine object
1054 * @add: virtual memory address
1055 * @data: buffer to store data
1056 * @size: size of the @data buffer
1057 *
1058 * External interface to read data from dso address.
1059 */
1060ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1061 struct machine *machine, u64 addr,
1062 u8 *data, ssize_t size)
1063{
1064 u64 offset = map->map_ip(map, addr);
1065 return dso__data_read_offset(dso, machine, offset, data, size);
1066}
1067
1068struct map *dso__new_map(const char *name)
1069{
1070 struct map *map = NULL;
1071 struct dso *dso = dso__new(name);
1072
1073 if (dso)
1074 map = map__new2(0, dso);
1075
1076 return map;
1077}
1078
1079struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1080 const char *short_name, int dso_type)
1081{
1082 /*
1083 * The kernel dso could be created by build_id processing.
1084 */
1085 struct dso *dso = machine__findnew_dso(machine, name);
1086
1087 /*
1088 * We need to run this in all cases, since during the build_id
1089 * processing we had no idea this was the kernel dso.
1090 */
1091 if (dso != NULL) {
1092 dso__set_short_name(dso, short_name, false);
1093 dso->kernel = dso_type;
1094 }
1095
1096 return dso;
1097}
1098
1099void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1100{
1101 struct rb_root *root = dso->root;
1102
1103 if (name == NULL)
1104 return;
1105
1106 if (dso->long_name_allocated)
1107 free((char *)dso->long_name);
1108
1109 if (root) {
1110 rb_erase(&dso->rb_node, root);
1111 /*
1112 * __dsos__findnew_link_by_longname() isn't guaranteed to add it
1113 * back, so a clean removal is required here.
1114 */
1115 RB_CLEAR_NODE(&dso->rb_node);
1116 dso->root = NULL;
1117 }
1118
1119 dso->long_name = name;
1120 dso->long_name_len = strlen(name);
1121 dso->long_name_allocated = name_allocated;
1122
1123 if (root)
1124 __dsos__findnew_link_by_longname(root, dso, NULL);
1125}
1126
1127void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1128{
1129 if (name == NULL)
1130 return;
1131
1132 if (dso->short_name_allocated)
1133 free((char *)dso->short_name);
1134
1135 dso->short_name = name;
1136 dso->short_name_len = strlen(name);
1137 dso->short_name_allocated = name_allocated;
1138}
1139
1140int dso__name_len(const struct dso *dso)
1141{
1142 if (!dso)
1143 return strlen("[unknown]");
1144 if (verbose > 0)
1145 return dso->long_name_len;
1146
1147 return dso->short_name_len;
1148}
1149
1150bool dso__loaded(const struct dso *dso)
1151{
1152 return dso->loaded;
1153}
1154
1155bool dso__sorted_by_name(const struct dso *dso)
1156{
1157 return dso->sorted_by_name;
1158}
1159
1160void dso__set_sorted_by_name(struct dso *dso)
1161{
1162 dso->sorted_by_name = true;
1163}
1164
1165struct dso *dso__new(const char *name)
1166{
1167 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1168
1169 if (dso != NULL) {
1170 strcpy(dso->name, name);
1171 dso__set_long_name(dso, dso->name, false);
1172 dso__set_short_name(dso, dso->name, false);
1173 dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
1174 dso->data.cache = RB_ROOT;
1175 dso->inlined_nodes = RB_ROOT_CACHED;
1176 dso->srclines = RB_ROOT_CACHED;
1177 dso->data.fd = -1;
1178 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1179 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1180 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1181 dso->is_64_bit = (sizeof(void *) == 8);
1182 dso->loaded = 0;
1183 dso->rel = 0;
1184 dso->sorted_by_name = 0;
1185 dso->has_build_id = 0;
1186 dso->has_srcline = 1;
1187 dso->a2l_fails = 1;
1188 dso->kernel = DSO_TYPE_USER;
1189 dso->needs_swap = DSO_SWAP__UNSET;
1190 dso->comp = COMP_ID__NONE;
1191 RB_CLEAR_NODE(&dso->rb_node);
1192 dso->root = NULL;
1193 INIT_LIST_HEAD(&dso->node);
1194 INIT_LIST_HEAD(&dso->data.open_entry);
1195 pthread_mutex_init(&dso->lock, NULL);
1196 refcount_set(&dso->refcnt, 1);
1197 }
1198
1199 return dso;
1200}
1201
1202void dso__delete(struct dso *dso)
1203{
1204 if (!RB_EMPTY_NODE(&dso->rb_node))
1205 pr_err("DSO %s is still in rbtree when being deleted!\n",
1206 dso->long_name);
1207
1208 /* free inlines first, as they reference symbols */
1209 inlines__tree_delete(&dso->inlined_nodes);
1210 srcline__tree_delete(&dso->srclines);
1211 symbols__delete(&dso->symbols);
1212
1213 if (dso->short_name_allocated) {
1214 zfree((char **)&dso->short_name);
1215 dso->short_name_allocated = false;
1216 }
1217
1218 if (dso->long_name_allocated) {
1219 zfree((char **)&dso->long_name);
1220 dso->long_name_allocated = false;
1221 }
1222
1223 dso__data_close(dso);
1224 auxtrace_cache__free(dso->auxtrace_cache);
1225 dso_cache__free(dso);
1226 dso__free_a2l(dso);
1227 zfree(&dso->symsrc_filename);
1228 nsinfo__zput(dso->nsinfo);
1229 pthread_mutex_destroy(&dso->lock);
1230 free(dso);
1231}
1232
1233struct dso *dso__get(struct dso *dso)
1234{
1235 if (dso)
1236 refcount_inc(&dso->refcnt);
1237 return dso;
1238}
1239
1240void dso__put(struct dso *dso)
1241{
1242 if (dso && refcount_dec_and_test(&dso->refcnt))
1243 dso__delete(dso);
1244}
1245
1246void dso__set_build_id(struct dso *dso, void *build_id)
1247{
1248 memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1249 dso->has_build_id = 1;
1250}
1251
1252bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1253{
1254 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1255}
1256
1257void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1258{
1259 char path[PATH_MAX];
1260
1261 if (machine__is_default_guest(machine))
1262 return;
1263 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1264 if (sysfs__read_build_id(path, dso->build_id,
1265 sizeof(dso->build_id)) == 0)
1266 dso->has_build_id = true;
1267}
1268
1269int dso__kernel_module_get_build_id(struct dso *dso,
1270 const char *root_dir)
1271{
1272 char filename[PATH_MAX];
1273 /*
1274 * kernel module short names are of the form "[module]" and
1275 * we need just "module" here.
1276 */
1277 const char *name = dso->short_name + 1;
1278
1279 snprintf(filename, sizeof(filename),
1280 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1281 root_dir, (int)strlen(name) - 1, name);
1282
1283 if (sysfs__read_build_id(filename, dso->build_id,
1284 sizeof(dso->build_id)) == 0)
1285 dso->has_build_id = true;
1286
1287 return 0;
1288}
1289
1290size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1291{
1292 char sbuild_id[SBUILD_ID_SIZE];
1293
1294 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1295 return fprintf(fp, "%s", sbuild_id);
1296}
1297
1298size_t dso__fprintf(struct dso *dso, FILE *fp)
1299{
1300 struct rb_node *nd;
1301 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1302
1303 if (dso->short_name != dso->long_name)
1304 ret += fprintf(fp, "%s, ", dso->long_name);
1305 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1306 ret += dso__fprintf_buildid(dso, fp);
1307 ret += fprintf(fp, ")\n");
1308 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1309 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1310 ret += symbol__fprintf(pos, fp);
1311 }
1312
1313 return ret;
1314}
1315
1316enum dso_type dso__type(struct dso *dso, struct machine *machine)
1317{
1318 int fd;
1319 enum dso_type type = DSO__TYPE_UNKNOWN;
1320
1321 fd = dso__data_get_fd(dso, machine);
1322 if (fd >= 0) {
1323 type = dso__type_fd(fd);
1324 dso__data_put_fd(dso);
1325 }
1326
1327 return type;
1328}
1329
1330int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1331{
1332 int idx, errnum = dso->load_errno;
1333 /*
1334 * This must have a same ordering as the enum dso_load_errno.
1335 */
1336 static const char *dso_load__error_str[] = {
1337 "Internal tools/perf/ library error",
1338 "Invalid ELF file",
1339 "Can not read build id",
1340 "Mismatching build id",
1341 "Decompression failure",
1342 };
1343
1344 BUG_ON(buflen == 0);
1345
1346 if (errnum >= 0) {
1347 const char *err = str_error_r(errnum, buf, buflen);
1348
1349 if (err != buf)
1350 scnprintf(buf, buflen, "%s", err);
1351
1352 return 0;
1353 }
1354
1355 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1356 return -1;
1357
1358 idx = errnum - __DSO_LOAD_ERRNO__START;
1359 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1360 return 0;
1361}
1#include <asm/bug.h>
2#include <sys/time.h>
3#include <sys/resource.h>
4#include "symbol.h"
5#include "dso.h"
6#include "machine.h"
7#include "auxtrace.h"
8#include "util.h"
9#include "debug.h"
10
11char dso__symtab_origin(const struct dso *dso)
12{
13 static const char origin[] = {
14 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
15 [DSO_BINARY_TYPE__VMLINUX] = 'v',
16 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
17 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
18 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
19 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
20 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
21 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
22 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
23 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
24 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
25 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
26 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
27 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
28 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
29 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
30 };
31
32 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
33 return '!';
34 return origin[dso->symtab_type];
35}
36
37int dso__read_binary_type_filename(const struct dso *dso,
38 enum dso_binary_type type,
39 char *root_dir, char *filename, size_t size)
40{
41 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
42 int ret = 0;
43 size_t len;
44
45 switch (type) {
46 case DSO_BINARY_TYPE__DEBUGLINK: {
47 char *debuglink;
48
49 len = __symbol__join_symfs(filename, size, dso->long_name);
50 debuglink = filename + len;
51 while (debuglink != filename && *debuglink != '/')
52 debuglink--;
53 if (*debuglink == '/')
54 debuglink++;
55
56 ret = -1;
57 if (!is_regular_file(filename))
58 break;
59
60 ret = filename__read_debuglink(filename, debuglink,
61 size - (debuglink - filename));
62 }
63 break;
64 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
65 /* skip the locally configured cache if a symfs is given */
66 if (symbol_conf.symfs[0] ||
67 (dso__build_id_filename(dso, filename, size) == NULL))
68 ret = -1;
69 break;
70
71 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
72 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
73 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
74 break;
75
76 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
77 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
78 snprintf(filename + len, size - len, "%s", dso->long_name);
79 break;
80
81 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
82 {
83 const char *last_slash;
84 size_t dir_size;
85
86 last_slash = dso->long_name + dso->long_name_len;
87 while (last_slash != dso->long_name && *last_slash != '/')
88 last_slash--;
89
90 len = __symbol__join_symfs(filename, size, "");
91 dir_size = last_slash - dso->long_name + 2;
92 if (dir_size > (size - len)) {
93 ret = -1;
94 break;
95 }
96 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
97 len += scnprintf(filename + len , size - len, ".debug%s",
98 last_slash);
99 break;
100 }
101
102 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
103 if (!dso->has_build_id) {
104 ret = -1;
105 break;
106 }
107
108 build_id__sprintf(dso->build_id,
109 sizeof(dso->build_id),
110 build_id_hex);
111 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
112 snprintf(filename + len, size - len, "%.2s/%s.debug",
113 build_id_hex, build_id_hex + 2);
114 break;
115
116 case DSO_BINARY_TYPE__VMLINUX:
117 case DSO_BINARY_TYPE__GUEST_VMLINUX:
118 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
119 __symbol__join_symfs(filename, size, dso->long_name);
120 break;
121
122 case DSO_BINARY_TYPE__GUEST_KMODULE:
123 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
124 path__join3(filename, size, symbol_conf.symfs,
125 root_dir, dso->long_name);
126 break;
127
128 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
129 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
130 __symbol__join_symfs(filename, size, dso->long_name);
131 break;
132
133 case DSO_BINARY_TYPE__KCORE:
134 case DSO_BINARY_TYPE__GUEST_KCORE:
135 snprintf(filename, size, "%s", dso->long_name);
136 break;
137
138 default:
139 case DSO_BINARY_TYPE__KALLSYMS:
140 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
141 case DSO_BINARY_TYPE__JAVA_JIT:
142 case DSO_BINARY_TYPE__NOT_FOUND:
143 ret = -1;
144 break;
145 }
146
147 return ret;
148}
149
150static const struct {
151 const char *fmt;
152 int (*decompress)(const char *input, int output);
153} compressions[] = {
154#ifdef HAVE_ZLIB_SUPPORT
155 { "gz", gzip_decompress_to_file },
156#endif
157#ifdef HAVE_LZMA_SUPPORT
158 { "xz", lzma_decompress_to_file },
159#endif
160 { NULL, NULL },
161};
162
163bool is_supported_compression(const char *ext)
164{
165 unsigned i;
166
167 for (i = 0; compressions[i].fmt; i++) {
168 if (!strcmp(ext, compressions[i].fmt))
169 return true;
170 }
171 return false;
172}
173
174bool is_kernel_module(const char *pathname, int cpumode)
175{
176 struct kmod_path m;
177 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
178
179 WARN_ONCE(mode != cpumode,
180 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
181 cpumode);
182
183 switch (mode) {
184 case PERF_RECORD_MISC_USER:
185 case PERF_RECORD_MISC_HYPERVISOR:
186 case PERF_RECORD_MISC_GUEST_USER:
187 return false;
188 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
189 default:
190 if (kmod_path__parse(&m, pathname)) {
191 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
192 pathname);
193 return true;
194 }
195 }
196
197 return m.kmod;
198}
199
200bool decompress_to_file(const char *ext, const char *filename, int output_fd)
201{
202 unsigned i;
203
204 for (i = 0; compressions[i].fmt; i++) {
205 if (!strcmp(ext, compressions[i].fmt))
206 return !compressions[i].decompress(filename,
207 output_fd);
208 }
209 return false;
210}
211
212bool dso__needs_decompress(struct dso *dso)
213{
214 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
215 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
216}
217
218/*
219 * Parses kernel module specified in @path and updates
220 * @m argument like:
221 *
222 * @comp - true if @path contains supported compression suffix,
223 * false otherwise
224 * @kmod - true if @path contains '.ko' suffix in right position,
225 * false otherwise
226 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
227 * of the kernel module without suffixes, otherwise strudup-ed
228 * base name of @path
229 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
230 * the compression suffix
231 *
232 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
233 */
234int __kmod_path__parse(struct kmod_path *m, const char *path,
235 bool alloc_name, bool alloc_ext)
236{
237 const char *name = strrchr(path, '/');
238 const char *ext = strrchr(path, '.');
239 bool is_simple_name = false;
240
241 memset(m, 0x0, sizeof(*m));
242 name = name ? name + 1 : path;
243
244 /*
245 * '.' is also a valid character for module name. For example:
246 * [aaa.bbb] is a valid module name. '[' should have higher
247 * priority than '.ko' suffix.
248 *
249 * The kernel names are from machine__mmap_name. Such
250 * name should belong to kernel itself, not kernel module.
251 */
252 if (name[0] == '[') {
253 is_simple_name = true;
254 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
255 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
256 (strncmp(name, "[vdso]", 6) == 0) ||
257 (strncmp(name, "[vsyscall]", 10) == 0)) {
258 m->kmod = false;
259
260 } else
261 m->kmod = true;
262 }
263
264 /* No extension, just return name. */
265 if ((ext == NULL) || is_simple_name) {
266 if (alloc_name) {
267 m->name = strdup(name);
268 return m->name ? 0 : -ENOMEM;
269 }
270 return 0;
271 }
272
273 if (is_supported_compression(ext + 1)) {
274 m->comp = true;
275 ext -= 3;
276 }
277
278 /* Check .ko extension only if there's enough name left. */
279 if (ext > name)
280 m->kmod = !strncmp(ext, ".ko", 3);
281
282 if (alloc_name) {
283 if (m->kmod) {
284 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
285 return -ENOMEM;
286 } else {
287 if (asprintf(&m->name, "%s", name) == -1)
288 return -ENOMEM;
289 }
290
291 strxfrchar(m->name, '-', '_');
292 }
293
294 if (alloc_ext && m->comp) {
295 m->ext = strdup(ext + 4);
296 if (!m->ext) {
297 free((void *) m->name);
298 return -ENOMEM;
299 }
300 }
301
302 return 0;
303}
304
305/*
306 * Global list of open DSOs and the counter.
307 */
308static LIST_HEAD(dso__data_open);
309static long dso__data_open_cnt;
310static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
311
312static void dso__list_add(struct dso *dso)
313{
314 list_add_tail(&dso->data.open_entry, &dso__data_open);
315 dso__data_open_cnt++;
316}
317
318static void dso__list_del(struct dso *dso)
319{
320 list_del(&dso->data.open_entry);
321 WARN_ONCE(dso__data_open_cnt <= 0,
322 "DSO data fd counter out of bounds.");
323 dso__data_open_cnt--;
324}
325
326static void close_first_dso(void);
327
328static int do_open(char *name)
329{
330 int fd;
331 char sbuf[STRERR_BUFSIZE];
332
333 do {
334 fd = open(name, O_RDONLY);
335 if (fd >= 0)
336 return fd;
337
338 pr_debug("dso open failed: %s\n",
339 strerror_r(errno, sbuf, sizeof(sbuf)));
340 if (!dso__data_open_cnt || errno != EMFILE)
341 break;
342
343 close_first_dso();
344 } while (1);
345
346 return -1;
347}
348
349static int __open_dso(struct dso *dso, struct machine *machine)
350{
351 int fd;
352 char *root_dir = (char *)"";
353 char *name = malloc(PATH_MAX);
354
355 if (!name)
356 return -ENOMEM;
357
358 if (machine)
359 root_dir = machine->root_dir;
360
361 if (dso__read_binary_type_filename(dso, dso->binary_type,
362 root_dir, name, PATH_MAX)) {
363 free(name);
364 return -EINVAL;
365 }
366
367 fd = do_open(name);
368 free(name);
369 return fd;
370}
371
372static void check_data_close(void);
373
374/**
375 * dso_close - Open DSO data file
376 * @dso: dso object
377 *
378 * Open @dso's data file descriptor and updates
379 * list/count of open DSO objects.
380 */
381static int open_dso(struct dso *dso, struct machine *machine)
382{
383 int fd = __open_dso(dso, machine);
384
385 if (fd >= 0) {
386 dso__list_add(dso);
387 /*
388 * Check if we crossed the allowed number
389 * of opened DSOs and close one if needed.
390 */
391 check_data_close();
392 }
393
394 return fd;
395}
396
397static void close_data_fd(struct dso *dso)
398{
399 if (dso->data.fd >= 0) {
400 close(dso->data.fd);
401 dso->data.fd = -1;
402 dso->data.file_size = 0;
403 dso__list_del(dso);
404 }
405}
406
407/**
408 * dso_close - Close DSO data file
409 * @dso: dso object
410 *
411 * Close @dso's data file descriptor and updates
412 * list/count of open DSO objects.
413 */
414static void close_dso(struct dso *dso)
415{
416 close_data_fd(dso);
417}
418
419static void close_first_dso(void)
420{
421 struct dso *dso;
422
423 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
424 close_dso(dso);
425}
426
427static rlim_t get_fd_limit(void)
428{
429 struct rlimit l;
430 rlim_t limit = 0;
431
432 /* Allow half of the current open fd limit. */
433 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
434 if (l.rlim_cur == RLIM_INFINITY)
435 limit = l.rlim_cur;
436 else
437 limit = l.rlim_cur / 2;
438 } else {
439 pr_err("failed to get fd limit\n");
440 limit = 1;
441 }
442
443 return limit;
444}
445
446static bool may_cache_fd(void)
447{
448 static rlim_t limit;
449
450 if (!limit)
451 limit = get_fd_limit();
452
453 if (limit == RLIM_INFINITY)
454 return true;
455
456 return limit > (rlim_t) dso__data_open_cnt;
457}
458
459/*
460 * Check and close LRU dso if we crossed allowed limit
461 * for opened dso file descriptors. The limit is half
462 * of the RLIMIT_NOFILE files opened.
463*/
464static void check_data_close(void)
465{
466 bool cache_fd = may_cache_fd();
467
468 if (!cache_fd)
469 close_first_dso();
470}
471
472/**
473 * dso__data_close - Close DSO data file
474 * @dso: dso object
475 *
476 * External interface to close @dso's data file descriptor.
477 */
478void dso__data_close(struct dso *dso)
479{
480 pthread_mutex_lock(&dso__data_open_lock);
481 close_dso(dso);
482 pthread_mutex_unlock(&dso__data_open_lock);
483}
484
485static void try_to_open_dso(struct dso *dso, struct machine *machine)
486{
487 enum dso_binary_type binary_type_data[] = {
488 DSO_BINARY_TYPE__BUILD_ID_CACHE,
489 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
490 DSO_BINARY_TYPE__NOT_FOUND,
491 };
492 int i = 0;
493
494 if (dso->data.fd >= 0)
495 return;
496
497 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
498 dso->data.fd = open_dso(dso, machine);
499 goto out;
500 }
501
502 do {
503 dso->binary_type = binary_type_data[i++];
504
505 dso->data.fd = open_dso(dso, machine);
506 if (dso->data.fd >= 0)
507 goto out;
508
509 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
510out:
511 if (dso->data.fd >= 0)
512 dso->data.status = DSO_DATA_STATUS_OK;
513 else
514 dso->data.status = DSO_DATA_STATUS_ERROR;
515}
516
517/**
518 * dso__data_get_fd - Get dso's data file descriptor
519 * @dso: dso object
520 * @machine: machine object
521 *
522 * External interface to find dso's file, open it and
523 * returns file descriptor. It should be paired with
524 * dso__data_put_fd() if it returns non-negative value.
525 */
526int dso__data_get_fd(struct dso *dso, struct machine *machine)
527{
528 if (dso->data.status == DSO_DATA_STATUS_ERROR)
529 return -1;
530
531 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
532 return -1;
533
534 try_to_open_dso(dso, machine);
535
536 if (dso->data.fd < 0)
537 pthread_mutex_unlock(&dso__data_open_lock);
538
539 return dso->data.fd;
540}
541
542void dso__data_put_fd(struct dso *dso __maybe_unused)
543{
544 pthread_mutex_unlock(&dso__data_open_lock);
545}
546
547bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
548{
549 u32 flag = 1 << by;
550
551 if (dso->data.status_seen & flag)
552 return true;
553
554 dso->data.status_seen |= flag;
555
556 return false;
557}
558
559static void
560dso_cache__free(struct dso *dso)
561{
562 struct rb_root *root = &dso->data.cache;
563 struct rb_node *next = rb_first(root);
564
565 pthread_mutex_lock(&dso->lock);
566 while (next) {
567 struct dso_cache *cache;
568
569 cache = rb_entry(next, struct dso_cache, rb_node);
570 next = rb_next(&cache->rb_node);
571 rb_erase(&cache->rb_node, root);
572 free(cache);
573 }
574 pthread_mutex_unlock(&dso->lock);
575}
576
577static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
578{
579 const struct rb_root *root = &dso->data.cache;
580 struct rb_node * const *p = &root->rb_node;
581 const struct rb_node *parent = NULL;
582 struct dso_cache *cache;
583
584 while (*p != NULL) {
585 u64 end;
586
587 parent = *p;
588 cache = rb_entry(parent, struct dso_cache, rb_node);
589 end = cache->offset + DSO__DATA_CACHE_SIZE;
590
591 if (offset < cache->offset)
592 p = &(*p)->rb_left;
593 else if (offset >= end)
594 p = &(*p)->rb_right;
595 else
596 return cache;
597 }
598
599 return NULL;
600}
601
602static struct dso_cache *
603dso_cache__insert(struct dso *dso, struct dso_cache *new)
604{
605 struct rb_root *root = &dso->data.cache;
606 struct rb_node **p = &root->rb_node;
607 struct rb_node *parent = NULL;
608 struct dso_cache *cache;
609 u64 offset = new->offset;
610
611 pthread_mutex_lock(&dso->lock);
612 while (*p != NULL) {
613 u64 end;
614
615 parent = *p;
616 cache = rb_entry(parent, struct dso_cache, rb_node);
617 end = cache->offset + DSO__DATA_CACHE_SIZE;
618
619 if (offset < cache->offset)
620 p = &(*p)->rb_left;
621 else if (offset >= end)
622 p = &(*p)->rb_right;
623 else
624 goto out;
625 }
626
627 rb_link_node(&new->rb_node, parent, p);
628 rb_insert_color(&new->rb_node, root);
629
630 cache = NULL;
631out:
632 pthread_mutex_unlock(&dso->lock);
633 return cache;
634}
635
636static ssize_t
637dso_cache__memcpy(struct dso_cache *cache, u64 offset,
638 u8 *data, u64 size)
639{
640 u64 cache_offset = offset - cache->offset;
641 u64 cache_size = min(cache->size - cache_offset, size);
642
643 memcpy(data, cache->data + cache_offset, cache_size);
644 return cache_size;
645}
646
647static ssize_t
648dso_cache__read(struct dso *dso, struct machine *machine,
649 u64 offset, u8 *data, ssize_t size)
650{
651 struct dso_cache *cache;
652 struct dso_cache *old;
653 ssize_t ret;
654
655 do {
656 u64 cache_offset;
657
658 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
659 if (!cache)
660 return -ENOMEM;
661
662 pthread_mutex_lock(&dso__data_open_lock);
663
664 /*
665 * dso->data.fd might be closed if other thread opened another
666 * file (dso) due to open file limit (RLIMIT_NOFILE).
667 */
668 try_to_open_dso(dso, machine);
669
670 if (dso->data.fd < 0) {
671 ret = -errno;
672 dso->data.status = DSO_DATA_STATUS_ERROR;
673 break;
674 }
675
676 cache_offset = offset & DSO__DATA_CACHE_MASK;
677
678 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
679 if (ret <= 0)
680 break;
681
682 cache->offset = cache_offset;
683 cache->size = ret;
684 } while (0);
685
686 pthread_mutex_unlock(&dso__data_open_lock);
687
688 if (ret > 0) {
689 old = dso_cache__insert(dso, cache);
690 if (old) {
691 /* we lose the race */
692 free(cache);
693 cache = old;
694 }
695
696 ret = dso_cache__memcpy(cache, offset, data, size);
697 }
698
699 if (ret <= 0)
700 free(cache);
701
702 return ret;
703}
704
705static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
706 u64 offset, u8 *data, ssize_t size)
707{
708 struct dso_cache *cache;
709
710 cache = dso_cache__find(dso, offset);
711 if (cache)
712 return dso_cache__memcpy(cache, offset, data, size);
713 else
714 return dso_cache__read(dso, machine, offset, data, size);
715}
716
717/*
718 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
719 * in the rb_tree. Any read to already cached data is served
720 * by cached data.
721 */
722static ssize_t cached_read(struct dso *dso, struct machine *machine,
723 u64 offset, u8 *data, ssize_t size)
724{
725 ssize_t r = 0;
726 u8 *p = data;
727
728 do {
729 ssize_t ret;
730
731 ret = dso_cache_read(dso, machine, offset, p, size);
732 if (ret < 0)
733 return ret;
734
735 /* Reached EOF, return what we have. */
736 if (!ret)
737 break;
738
739 BUG_ON(ret > size);
740
741 r += ret;
742 p += ret;
743 offset += ret;
744 size -= ret;
745
746 } while (size);
747
748 return r;
749}
750
751static int data_file_size(struct dso *dso, struct machine *machine)
752{
753 int ret = 0;
754 struct stat st;
755 char sbuf[STRERR_BUFSIZE];
756
757 if (dso->data.file_size)
758 return 0;
759
760 if (dso->data.status == DSO_DATA_STATUS_ERROR)
761 return -1;
762
763 pthread_mutex_lock(&dso__data_open_lock);
764
765 /*
766 * dso->data.fd might be closed if other thread opened another
767 * file (dso) due to open file limit (RLIMIT_NOFILE).
768 */
769 try_to_open_dso(dso, machine);
770
771 if (dso->data.fd < 0) {
772 ret = -errno;
773 dso->data.status = DSO_DATA_STATUS_ERROR;
774 goto out;
775 }
776
777 if (fstat(dso->data.fd, &st) < 0) {
778 ret = -errno;
779 pr_err("dso cache fstat failed: %s\n",
780 strerror_r(errno, sbuf, sizeof(sbuf)));
781 dso->data.status = DSO_DATA_STATUS_ERROR;
782 goto out;
783 }
784 dso->data.file_size = st.st_size;
785
786out:
787 pthread_mutex_unlock(&dso__data_open_lock);
788 return ret;
789}
790
791/**
792 * dso__data_size - Return dso data size
793 * @dso: dso object
794 * @machine: machine object
795 *
796 * Return: dso data size
797 */
798off_t dso__data_size(struct dso *dso, struct machine *machine)
799{
800 if (data_file_size(dso, machine))
801 return -1;
802
803 /* For now just estimate dso data size is close to file size */
804 return dso->data.file_size;
805}
806
807static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
808 u64 offset, u8 *data, ssize_t size)
809{
810 if (data_file_size(dso, machine))
811 return -1;
812
813 /* Check the offset sanity. */
814 if (offset > dso->data.file_size)
815 return -1;
816
817 if (offset + size < offset)
818 return -1;
819
820 return cached_read(dso, machine, offset, data, size);
821}
822
823/**
824 * dso__data_read_offset - Read data from dso file offset
825 * @dso: dso object
826 * @machine: machine object
827 * @offset: file offset
828 * @data: buffer to store data
829 * @size: size of the @data buffer
830 *
831 * External interface to read data from dso file offset. Open
832 * dso data file and use cached_read to get the data.
833 */
834ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
835 u64 offset, u8 *data, ssize_t size)
836{
837 if (dso->data.status == DSO_DATA_STATUS_ERROR)
838 return -1;
839
840 return data_read_offset(dso, machine, offset, data, size);
841}
842
843/**
844 * dso__data_read_addr - Read data from dso address
845 * @dso: dso object
846 * @machine: machine object
847 * @add: virtual memory address
848 * @data: buffer to store data
849 * @size: size of the @data buffer
850 *
851 * External interface to read data from dso address.
852 */
853ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
854 struct machine *machine, u64 addr,
855 u8 *data, ssize_t size)
856{
857 u64 offset = map->map_ip(map, addr);
858 return dso__data_read_offset(dso, machine, offset, data, size);
859}
860
861struct map *dso__new_map(const char *name)
862{
863 struct map *map = NULL;
864 struct dso *dso = dso__new(name);
865
866 if (dso)
867 map = map__new2(0, dso, MAP__FUNCTION);
868
869 return map;
870}
871
872struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
873 const char *short_name, int dso_type)
874{
875 /*
876 * The kernel dso could be created by build_id processing.
877 */
878 struct dso *dso = machine__findnew_dso(machine, name);
879
880 /*
881 * We need to run this in all cases, since during the build_id
882 * processing we had no idea this was the kernel dso.
883 */
884 if (dso != NULL) {
885 dso__set_short_name(dso, short_name, false);
886 dso->kernel = dso_type;
887 }
888
889 return dso;
890}
891
892/*
893 * Find a matching entry and/or link current entry to RB tree.
894 * Either one of the dso or name parameter must be non-NULL or the
895 * function will not work.
896 */
897static struct dso *__dso__findlink_by_longname(struct rb_root *root,
898 struct dso *dso, const char *name)
899{
900 struct rb_node **p = &root->rb_node;
901 struct rb_node *parent = NULL;
902
903 if (!name)
904 name = dso->long_name;
905 /*
906 * Find node with the matching name
907 */
908 while (*p) {
909 struct dso *this = rb_entry(*p, struct dso, rb_node);
910 int rc = strcmp(name, this->long_name);
911
912 parent = *p;
913 if (rc == 0) {
914 /*
915 * In case the new DSO is a duplicate of an existing
916 * one, print an one-time warning & put the new entry
917 * at the end of the list of duplicates.
918 */
919 if (!dso || (dso == this))
920 return this; /* Find matching dso */
921 /*
922 * The core kernel DSOs may have duplicated long name.
923 * In this case, the short name should be different.
924 * Comparing the short names to differentiate the DSOs.
925 */
926 rc = strcmp(dso->short_name, this->short_name);
927 if (rc == 0) {
928 pr_err("Duplicated dso name: %s\n", name);
929 return NULL;
930 }
931 }
932 if (rc < 0)
933 p = &parent->rb_left;
934 else
935 p = &parent->rb_right;
936 }
937 if (dso) {
938 /* Add new node and rebalance tree */
939 rb_link_node(&dso->rb_node, parent, p);
940 rb_insert_color(&dso->rb_node, root);
941 dso->root = root;
942 }
943 return NULL;
944}
945
946static inline struct dso *__dso__find_by_longname(struct rb_root *root,
947 const char *name)
948{
949 return __dso__findlink_by_longname(root, NULL, name);
950}
951
952void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
953{
954 struct rb_root *root = dso->root;
955
956 if (name == NULL)
957 return;
958
959 if (dso->long_name_allocated)
960 free((char *)dso->long_name);
961
962 if (root) {
963 rb_erase(&dso->rb_node, root);
964 /*
965 * __dso__findlink_by_longname() isn't guaranteed to add it
966 * back, so a clean removal is required here.
967 */
968 RB_CLEAR_NODE(&dso->rb_node);
969 dso->root = NULL;
970 }
971
972 dso->long_name = name;
973 dso->long_name_len = strlen(name);
974 dso->long_name_allocated = name_allocated;
975
976 if (root)
977 __dso__findlink_by_longname(root, dso, NULL);
978}
979
980void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
981{
982 if (name == NULL)
983 return;
984
985 if (dso->short_name_allocated)
986 free((char *)dso->short_name);
987
988 dso->short_name = name;
989 dso->short_name_len = strlen(name);
990 dso->short_name_allocated = name_allocated;
991}
992
993static void dso__set_basename(struct dso *dso)
994{
995 /*
996 * basename() may modify path buffer, so we must pass
997 * a copy.
998 */
999 char *base, *lname = strdup(dso->long_name);
1000
1001 if (!lname)
1002 return;
1003
1004 /*
1005 * basename() may return a pointer to internal
1006 * storage which is reused in subsequent calls
1007 * so copy the result.
1008 */
1009 base = strdup(basename(lname));
1010
1011 free(lname);
1012
1013 if (!base)
1014 return;
1015
1016 dso__set_short_name(dso, base, true);
1017}
1018
1019int dso__name_len(const struct dso *dso)
1020{
1021 if (!dso)
1022 return strlen("[unknown]");
1023 if (verbose)
1024 return dso->long_name_len;
1025
1026 return dso->short_name_len;
1027}
1028
1029bool dso__loaded(const struct dso *dso, enum map_type type)
1030{
1031 return dso->loaded & (1 << type);
1032}
1033
1034bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
1035{
1036 return dso->sorted_by_name & (1 << type);
1037}
1038
1039void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
1040{
1041 dso->sorted_by_name |= (1 << type);
1042}
1043
1044struct dso *dso__new(const char *name)
1045{
1046 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1047
1048 if (dso != NULL) {
1049 int i;
1050 strcpy(dso->name, name);
1051 dso__set_long_name(dso, dso->name, false);
1052 dso__set_short_name(dso, dso->name, false);
1053 for (i = 0; i < MAP__NR_TYPES; ++i)
1054 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1055 dso->data.cache = RB_ROOT;
1056 dso->data.fd = -1;
1057 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1058 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1059 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1060 dso->is_64_bit = (sizeof(void *) == 8);
1061 dso->loaded = 0;
1062 dso->rel = 0;
1063 dso->sorted_by_name = 0;
1064 dso->has_build_id = 0;
1065 dso->has_srcline = 1;
1066 dso->a2l_fails = 1;
1067 dso->kernel = DSO_TYPE_USER;
1068 dso->needs_swap = DSO_SWAP__UNSET;
1069 RB_CLEAR_NODE(&dso->rb_node);
1070 dso->root = NULL;
1071 INIT_LIST_HEAD(&dso->node);
1072 INIT_LIST_HEAD(&dso->data.open_entry);
1073 pthread_mutex_init(&dso->lock, NULL);
1074 atomic_set(&dso->refcnt, 1);
1075 }
1076
1077 return dso;
1078}
1079
1080void dso__delete(struct dso *dso)
1081{
1082 int i;
1083
1084 if (!RB_EMPTY_NODE(&dso->rb_node))
1085 pr_err("DSO %s is still in rbtree when being deleted!\n",
1086 dso->long_name);
1087 for (i = 0; i < MAP__NR_TYPES; ++i)
1088 symbols__delete(&dso->symbols[i]);
1089
1090 if (dso->short_name_allocated) {
1091 zfree((char **)&dso->short_name);
1092 dso->short_name_allocated = false;
1093 }
1094
1095 if (dso->long_name_allocated) {
1096 zfree((char **)&dso->long_name);
1097 dso->long_name_allocated = false;
1098 }
1099
1100 dso__data_close(dso);
1101 auxtrace_cache__free(dso->auxtrace_cache);
1102 dso_cache__free(dso);
1103 dso__free_a2l(dso);
1104 zfree(&dso->symsrc_filename);
1105 pthread_mutex_destroy(&dso->lock);
1106 free(dso);
1107}
1108
1109struct dso *dso__get(struct dso *dso)
1110{
1111 if (dso)
1112 atomic_inc(&dso->refcnt);
1113 return dso;
1114}
1115
1116void dso__put(struct dso *dso)
1117{
1118 if (dso && atomic_dec_and_test(&dso->refcnt))
1119 dso__delete(dso);
1120}
1121
1122void dso__set_build_id(struct dso *dso, void *build_id)
1123{
1124 memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1125 dso->has_build_id = 1;
1126}
1127
1128bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1129{
1130 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1131}
1132
1133void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1134{
1135 char path[PATH_MAX];
1136
1137 if (machine__is_default_guest(machine))
1138 return;
1139 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1140 if (sysfs__read_build_id(path, dso->build_id,
1141 sizeof(dso->build_id)) == 0)
1142 dso->has_build_id = true;
1143}
1144
1145int dso__kernel_module_get_build_id(struct dso *dso,
1146 const char *root_dir)
1147{
1148 char filename[PATH_MAX];
1149 /*
1150 * kernel module short names are of the form "[module]" and
1151 * we need just "module" here.
1152 */
1153 const char *name = dso->short_name + 1;
1154
1155 snprintf(filename, sizeof(filename),
1156 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1157 root_dir, (int)strlen(name) - 1, name);
1158
1159 if (sysfs__read_build_id(filename, dso->build_id,
1160 sizeof(dso->build_id)) == 0)
1161 dso->has_build_id = true;
1162
1163 return 0;
1164}
1165
1166bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1167{
1168 bool have_build_id = false;
1169 struct dso *pos;
1170
1171 list_for_each_entry(pos, head, node) {
1172 if (with_hits && !pos->hit)
1173 continue;
1174 if (pos->has_build_id) {
1175 have_build_id = true;
1176 continue;
1177 }
1178 if (filename__read_build_id(pos->long_name, pos->build_id,
1179 sizeof(pos->build_id)) > 0) {
1180 have_build_id = true;
1181 pos->has_build_id = true;
1182 }
1183 }
1184
1185 return have_build_id;
1186}
1187
1188void __dsos__add(struct dsos *dsos, struct dso *dso)
1189{
1190 list_add_tail(&dso->node, &dsos->head);
1191 __dso__findlink_by_longname(&dsos->root, dso, NULL);
1192 /*
1193 * It is now in the linked list, grab a reference, then garbage collect
1194 * this when needing memory, by looking at LRU dso instances in the
1195 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1196 * anywhere besides the one for the list, do, under a lock for the
1197 * list: remove it from the list, then a dso__put(), that probably will
1198 * be the last and will then call dso__delete(), end of life.
1199 *
1200 * That, or at the end of the 'struct machine' lifetime, when all
1201 * 'struct dso' instances will be removed from the list, in
1202 * dsos__exit(), if they have no other reference from some other data
1203 * structure.
1204 *
1205 * E.g.: after processing a 'perf.data' file and storing references
1206 * to objects instantiated while processing events, we will have
1207 * references to the 'thread', 'map', 'dso' structs all from 'struct
1208 * hist_entry' instances, but we may not need anything not referenced,
1209 * so we might as well call machines__exit()/machines__delete() and
1210 * garbage collect it.
1211 */
1212 dso__get(dso);
1213}
1214
1215void dsos__add(struct dsos *dsos, struct dso *dso)
1216{
1217 pthread_rwlock_wrlock(&dsos->lock);
1218 __dsos__add(dsos, dso);
1219 pthread_rwlock_unlock(&dsos->lock);
1220}
1221
1222struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1223{
1224 struct dso *pos;
1225
1226 if (cmp_short) {
1227 list_for_each_entry(pos, &dsos->head, node)
1228 if (strcmp(pos->short_name, name) == 0)
1229 return pos;
1230 return NULL;
1231 }
1232 return __dso__find_by_longname(&dsos->root, name);
1233}
1234
1235struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1236{
1237 struct dso *dso;
1238 pthread_rwlock_rdlock(&dsos->lock);
1239 dso = __dsos__find(dsos, name, cmp_short);
1240 pthread_rwlock_unlock(&dsos->lock);
1241 return dso;
1242}
1243
1244struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1245{
1246 struct dso *dso = dso__new(name);
1247
1248 if (dso != NULL) {
1249 __dsos__add(dsos, dso);
1250 dso__set_basename(dso);
1251 /* Put dso here because __dsos_add already got it */
1252 dso__put(dso);
1253 }
1254 return dso;
1255}
1256
1257struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1258{
1259 struct dso *dso = __dsos__find(dsos, name, false);
1260
1261 return dso ? dso : __dsos__addnew(dsos, name);
1262}
1263
1264struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1265{
1266 struct dso *dso;
1267 pthread_rwlock_wrlock(&dsos->lock);
1268 dso = dso__get(__dsos__findnew(dsos, name));
1269 pthread_rwlock_unlock(&dsos->lock);
1270 return dso;
1271}
1272
1273size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1274 bool (skip)(struct dso *dso, int parm), int parm)
1275{
1276 struct dso *pos;
1277 size_t ret = 0;
1278
1279 list_for_each_entry(pos, head, node) {
1280 if (skip && skip(pos, parm))
1281 continue;
1282 ret += dso__fprintf_buildid(pos, fp);
1283 ret += fprintf(fp, " %s\n", pos->long_name);
1284 }
1285 return ret;
1286}
1287
1288size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1289{
1290 struct dso *pos;
1291 size_t ret = 0;
1292
1293 list_for_each_entry(pos, head, node) {
1294 int i;
1295 for (i = 0; i < MAP__NR_TYPES; ++i)
1296 ret += dso__fprintf(pos, i, fp);
1297 }
1298
1299 return ret;
1300}
1301
1302size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1303{
1304 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1305
1306 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1307 return fprintf(fp, "%s", sbuild_id);
1308}
1309
1310size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
1311{
1312 struct rb_node *nd;
1313 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1314
1315 if (dso->short_name != dso->long_name)
1316 ret += fprintf(fp, "%s, ", dso->long_name);
1317 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
1318 dso__loaded(dso, type) ? "" : "NOT ");
1319 ret += dso__fprintf_buildid(dso, fp);
1320 ret += fprintf(fp, ")\n");
1321 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
1322 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1323 ret += symbol__fprintf(pos, fp);
1324 }
1325
1326 return ret;
1327}
1328
1329enum dso_type dso__type(struct dso *dso, struct machine *machine)
1330{
1331 int fd;
1332 enum dso_type type = DSO__TYPE_UNKNOWN;
1333
1334 fd = dso__data_get_fd(dso, machine);
1335 if (fd >= 0) {
1336 type = dso__type_fd(fd);
1337 dso__data_put_fd(dso);
1338 }
1339
1340 return type;
1341}
1342
1343int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1344{
1345 int idx, errnum = dso->load_errno;
1346 /*
1347 * This must have a same ordering as the enum dso_load_errno.
1348 */
1349 static const char *dso_load__error_str[] = {
1350 "Internal tools/perf/ library error",
1351 "Invalid ELF file",
1352 "Can not read build id",
1353 "Mismatching build id",
1354 "Decompression failure",
1355 };
1356
1357 BUG_ON(buflen == 0);
1358
1359 if (errnum >= 0) {
1360 const char *err = strerror_r(errnum, buf, buflen);
1361
1362 if (err != buf)
1363 scnprintf(buf, buflen, "%s", err);
1364
1365 return 0;
1366 }
1367
1368 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1369 return -1;
1370
1371 idx = errnum - __DSO_LOAD_ERRNO__START;
1372 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1373 return 0;
1374}