Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <asm/bug.h>
   3#include <linux/kernel.h>
   4#include <linux/string.h>
   5#include <linux/zalloc.h>
   6#include <sys/time.h>
   7#include <sys/resource.h>
   8#include <sys/types.h>
   9#include <sys/stat.h>
  10#include <unistd.h>
  11#include <errno.h>
  12#include <fcntl.h>
  13#include <stdlib.h>
  14#ifdef HAVE_LIBBPF_SUPPORT
  15#include <bpf/libbpf.h>
  16#include "bpf-event.h"
  17#include "bpf-utils.h"
  18#endif
  19#include "compress.h"
  20#include "env.h"
  21#include "namespaces.h"
  22#include "path.h"
  23#include "map.h"
  24#include "symbol.h"
  25#include "srcline.h"
  26#include "dso.h"
  27#include "dsos.h"
  28#include "machine.h"
  29#include "auxtrace.h"
  30#include "util.h" /* O_CLOEXEC for older systems */
  31#include "debug.h"
  32#include "string2.h"
  33#include "vdso.h"
  34
  35static const char * const debuglink_paths[] = {
  36	"%.0s%s",
  37	"%s/%s",
  38	"%s/.debug/%s",
  39	"/usr/lib/debug%s/%s"
  40};
  41
  42char dso__symtab_origin(const struct dso *dso)
  43{
  44	static const char origin[] = {
  45		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
  46		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
  47		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
  48		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
  49		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
  50		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
  51		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
  52		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
  53		[DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO]	= 'x',
  54		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
  55		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
  56		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
  57		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
  58		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
  59		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
  60		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
  61		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
  62		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
  63	};
  64
  65	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  66		return '!';
  67	return origin[dso->symtab_type];
  68}
  69
  70int dso__read_binary_type_filename(const struct dso *dso,
  71				   enum dso_binary_type type,
  72				   char *root_dir, char *filename, size_t size)
  73{
  74	char build_id_hex[SBUILD_ID_SIZE];
  75	int ret = 0;
  76	size_t len;
  77
  78	switch (type) {
  79	case DSO_BINARY_TYPE__DEBUGLINK:
  80	{
  81		const char *last_slash;
  82		char dso_dir[PATH_MAX];
  83		char symfile[PATH_MAX];
  84		unsigned int i;
  85
  86		len = __symbol__join_symfs(filename, size, dso->long_name);
  87		last_slash = filename + len;
  88		while (last_slash != filename && *last_slash != '/')
  89			last_slash--;
  90
  91		strncpy(dso_dir, filename, last_slash - filename);
  92		dso_dir[last_slash-filename] = '\0';
  93
  94		if (!is_regular_file(filename)) {
  95			ret = -1;
  96			break;
  97		}
  98
  99		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
 100		if (ret)
 101			break;
 102
 103		/* Check predefined locations where debug file might reside */
 104		ret = -1;
 105		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
 106			snprintf(filename, size,
 107					debuglink_paths[i], dso_dir, symfile);
 108			if (is_regular_file(filename)) {
 109				ret = 0;
 110				break;
 111			}
 112		}
 113
 114		break;
 115	}
 116	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
 117		if (dso__build_id_filename(dso, filename, size, false) == NULL)
 118			ret = -1;
 119		break;
 120
 121	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
 122		if (dso__build_id_filename(dso, filename, size, true) == NULL)
 123			ret = -1;
 124		break;
 125
 126	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
 127		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 128		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
 129		break;
 130
 131	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
 132		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 133		snprintf(filename + len, size - len, "%s", dso->long_name);
 134		break;
 135
 136	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
 137		/*
 138		 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
 139		 * /usr/lib/debug/lib when it is expected to be in
 140		 * /usr/lib/debug/usr/lib
 141		 */
 142		if (strlen(dso->long_name) < 9 ||
 143		    strncmp(dso->long_name, "/usr/lib/", 9)) {
 144			ret = -1;
 145			break;
 146		}
 147		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 148		snprintf(filename + len, size - len, "%s", dso->long_name + 4);
 149		break;
 150
 151	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
 152	{
 153		const char *last_slash;
 154		size_t dir_size;
 155
 156		last_slash = dso->long_name + dso->long_name_len;
 157		while (last_slash != dso->long_name && *last_slash != '/')
 158			last_slash--;
 159
 160		len = __symbol__join_symfs(filename, size, "");
 161		dir_size = last_slash - dso->long_name + 2;
 162		if (dir_size > (size - len)) {
 163			ret = -1;
 164			break;
 165		}
 166		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
 167		len += scnprintf(filename + len , size - len, ".debug%s",
 168								last_slash);
 169		break;
 170	}
 171
 172	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
 173		if (!dso->has_build_id) {
 174			ret = -1;
 175			break;
 176		}
 177
 178		build_id__sprintf(&dso->bid, build_id_hex);
 
 
 179		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
 180		snprintf(filename + len, size - len, "%.2s/%s.debug",
 181			 build_id_hex, build_id_hex + 2);
 182		break;
 183
 184	case DSO_BINARY_TYPE__VMLINUX:
 185	case DSO_BINARY_TYPE__GUEST_VMLINUX:
 186	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
 187		__symbol__join_symfs(filename, size, dso->long_name);
 188		break;
 189
 190	case DSO_BINARY_TYPE__GUEST_KMODULE:
 191	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
 192		path__join3(filename, size, symbol_conf.symfs,
 193			    root_dir, dso->long_name);
 194		break;
 195
 196	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
 197	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
 198		__symbol__join_symfs(filename, size, dso->long_name);
 199		break;
 200
 201	case DSO_BINARY_TYPE__KCORE:
 202	case DSO_BINARY_TYPE__GUEST_KCORE:
 203		snprintf(filename, size, "%s", dso->long_name);
 204		break;
 205
 206	default:
 207	case DSO_BINARY_TYPE__KALLSYMS:
 208	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
 209	case DSO_BINARY_TYPE__JAVA_JIT:
 210	case DSO_BINARY_TYPE__BPF_PROG_INFO:
 211	case DSO_BINARY_TYPE__BPF_IMAGE:
 212	case DSO_BINARY_TYPE__OOL:
 213	case DSO_BINARY_TYPE__NOT_FOUND:
 214		ret = -1;
 215		break;
 216	}
 217
 218	return ret;
 219}
 220
 221enum {
 222	COMP_ID__NONE = 0,
 223};
 224
 225static const struct {
 226	const char *fmt;
 227	int (*decompress)(const char *input, int output);
 228	bool (*is_compressed)(const char *input);
 229} compressions[] = {
 230	[COMP_ID__NONE] = { .fmt = NULL, },
 231#ifdef HAVE_ZLIB_SUPPORT
 232	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
 233#endif
 234#ifdef HAVE_LZMA_SUPPORT
 235	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
 236#endif
 237	{ NULL, NULL, NULL },
 238};
 239
 240static int is_supported_compression(const char *ext)
 241{
 242	unsigned i;
 243
 244	for (i = 1; compressions[i].fmt; i++) {
 245		if (!strcmp(ext, compressions[i].fmt))
 246			return i;
 247	}
 248	return COMP_ID__NONE;
 249}
 250
 251bool is_kernel_module(const char *pathname, int cpumode)
 252{
 253	struct kmod_path m;
 254	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
 255
 256	WARN_ONCE(mode != cpumode,
 257		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
 258		  cpumode);
 259
 260	switch (mode) {
 261	case PERF_RECORD_MISC_USER:
 262	case PERF_RECORD_MISC_HYPERVISOR:
 263	case PERF_RECORD_MISC_GUEST_USER:
 264		return false;
 265	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
 266	default:
 267		if (kmod_path__parse(&m, pathname)) {
 268			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
 269					pathname);
 270			return true;
 271		}
 272	}
 273
 274	return m.kmod;
 275}
 276
 277bool dso__needs_decompress(struct dso *dso)
 278{
 279	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
 280		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 281}
 282
 283int filename__decompress(const char *name, char *pathname,
 284			 size_t len, int comp, int *err)
 285{
 286	char tmpbuf[] = KMOD_DECOMP_NAME;
 287	int fd = -1;
 288
 
 
 
 
 
 
 289	/*
 290	 * We have proper compression id for DSO and yet the file
 291	 * behind the 'name' can still be plain uncompressed object.
 292	 *
 293	 * The reason is behind the logic we open the DSO object files,
 294	 * when we try all possible 'debug' objects until we find the
 295	 * data. So even if the DSO is represented by 'krava.xz' module,
 296	 * we can end up here opening ~/.debug/....23432432/debug' file
 297	 * which is not compressed.
 298	 *
 299	 * To keep this transparent, we detect this and return the file
 300	 * descriptor to the uncompressed file.
 301	 */
 302	if (!compressions[comp].is_compressed(name))
 303		return open(name, O_RDONLY);
 304
 305	fd = mkstemp(tmpbuf);
 306	if (fd < 0) {
 307		*err = errno;
 308		return -1;
 309	}
 310
 311	if (compressions[comp].decompress(name, fd)) {
 312		*err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
 313		close(fd);
 314		fd = -1;
 315	}
 316
 317	if (!pathname || (fd < 0))
 318		unlink(tmpbuf);
 319
 320	if (pathname && (fd >= 0))
 321		strlcpy(pathname, tmpbuf, len);
 322
 323	return fd;
 324}
 325
 326static int decompress_kmodule(struct dso *dso, const char *name,
 327			      char *pathname, size_t len)
 328{
 329	if (!dso__needs_decompress(dso))
 330		return -1;
 331
 332	if (dso->comp == COMP_ID__NONE)
 333		return -1;
 334
 335	return filename__decompress(name, pathname, len, dso->comp,
 336				    &dso->load_errno);
 337}
 338
 339int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
 340{
 341	return decompress_kmodule(dso, name, NULL, 0);
 342}
 343
 344int dso__decompress_kmodule_path(struct dso *dso, const char *name,
 345				 char *pathname, size_t len)
 346{
 347	int fd = decompress_kmodule(dso, name, pathname, len);
 348
 349	close(fd);
 350	return fd >= 0 ? 0 : -1;
 351}
 352
 353/*
 354 * Parses kernel module specified in @path and updates
 355 * @m argument like:
 356 *
 357 *    @comp - true if @path contains supported compression suffix,
 358 *            false otherwise
 359 *    @kmod - true if @path contains '.ko' suffix in right position,
 360 *            false otherwise
 361 *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
 362 *            of the kernel module without suffixes, otherwise strudup-ed
 363 *            base name of @path
 364 *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
 365 *            the compression suffix
 366 *
 367 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
 368 */
 369int __kmod_path__parse(struct kmod_path *m, const char *path,
 370		       bool alloc_name)
 371{
 372	const char *name = strrchr(path, '/');
 373	const char *ext  = strrchr(path, '.');
 374	bool is_simple_name = false;
 375
 376	memset(m, 0x0, sizeof(*m));
 377	name = name ? name + 1 : path;
 378
 379	/*
 380	 * '.' is also a valid character for module name. For example:
 381	 * [aaa.bbb] is a valid module name. '[' should have higher
 382	 * priority than '.ko' suffix.
 383	 *
 384	 * The kernel names are from machine__mmap_name. Such
 385	 * name should belong to kernel itself, not kernel module.
 386	 */
 387	if (name[0] == '[') {
 388		is_simple_name = true;
 389		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
 390		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
 391		    (strncmp(name, "[vdso]", 6) == 0) ||
 392		    (strncmp(name, "[vdso32]", 8) == 0) ||
 393		    (strncmp(name, "[vdsox32]", 9) == 0) ||
 394		    (strncmp(name, "[vsyscall]", 10) == 0)) {
 395			m->kmod = false;
 396
 397		} else
 398			m->kmod = true;
 399	}
 400
 401	/* No extension, just return name. */
 402	if ((ext == NULL) || is_simple_name) {
 403		if (alloc_name) {
 404			m->name = strdup(name);
 405			return m->name ? 0 : -ENOMEM;
 406		}
 407		return 0;
 408	}
 409
 410	m->comp = is_supported_compression(ext + 1);
 411	if (m->comp > COMP_ID__NONE)
 412		ext -= 3;
 413
 414	/* Check .ko extension only if there's enough name left. */
 415	if (ext > name)
 416		m->kmod = !strncmp(ext, ".ko", 3);
 417
 418	if (alloc_name) {
 419		if (m->kmod) {
 420			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
 421				return -ENOMEM;
 422		} else {
 423			if (asprintf(&m->name, "%s", name) == -1)
 424				return -ENOMEM;
 425		}
 426
 427		strreplace(m->name, '-', '_');
 428	}
 429
 430	return 0;
 431}
 432
 433void dso__set_module_info(struct dso *dso, struct kmod_path *m,
 434			  struct machine *machine)
 435{
 436	if (machine__is_host(machine))
 437		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
 438	else
 439		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
 440
 441	/* _KMODULE_COMP should be next to _KMODULE */
 442	if (m->kmod && m->comp) {
 443		dso->symtab_type++;
 444		dso->comp = m->comp;
 445	}
 446
 447	dso__set_short_name(dso, strdup(m->name), true);
 448}
 449
 450/*
 451 * Global list of open DSOs and the counter.
 452 */
 453static LIST_HEAD(dso__data_open);
 454static long dso__data_open_cnt;
 455static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
 456
 457static void dso__list_add(struct dso *dso)
 458{
 459	list_add_tail(&dso->data.open_entry, &dso__data_open);
 460	dso__data_open_cnt++;
 461}
 462
 463static void dso__list_del(struct dso *dso)
 464{
 465	list_del_init(&dso->data.open_entry);
 466	WARN_ONCE(dso__data_open_cnt <= 0,
 467		  "DSO data fd counter out of bounds.");
 468	dso__data_open_cnt--;
 469}
 470
 471static void close_first_dso(void);
 472
 473static int do_open(char *name)
 474{
 475	int fd;
 476	char sbuf[STRERR_BUFSIZE];
 477
 478	do {
 479		fd = open(name, O_RDONLY|O_CLOEXEC);
 480		if (fd >= 0)
 481			return fd;
 482
 483		pr_debug("dso open failed: %s\n",
 484			 str_error_r(errno, sbuf, sizeof(sbuf)));
 485		if (!dso__data_open_cnt || errno != EMFILE)
 486			break;
 487
 488		close_first_dso();
 489	} while (1);
 490
 491	return -1;
 492}
 493
 494static int __open_dso(struct dso *dso, struct machine *machine)
 495{
 496	int fd = -EINVAL;
 497	char *root_dir = (char *)"";
 498	char *name = malloc(PATH_MAX);
 499	bool decomp = false;
 500
 501	if (!name)
 502		return -ENOMEM;
 503
 504	mutex_lock(&dso->lock);
 505	if (machine)
 506		root_dir = machine->root_dir;
 507
 508	if (dso__read_binary_type_filename(dso, dso->binary_type,
 509					    root_dir, name, PATH_MAX))
 510		goto out;
 511
 512	if (!is_regular_file(name)) {
 513		char *new_name;
 514
 515		if (errno != ENOENT || dso->nsinfo == NULL)
 516			goto out;
 517
 518		new_name = filename_with_chroot(dso->nsinfo->pid, name);
 519		if (!new_name)
 520			goto out;
 521
 522		free(name);
 523		name = new_name;
 524	}
 525
 526	if (dso__needs_decompress(dso)) {
 527		char newpath[KMOD_DECOMP_LEN];
 528		size_t len = sizeof(newpath);
 529
 530		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
 531			fd = -dso->load_errno;
 532			goto out;
 533		}
 534
 535		decomp = true;
 536		strcpy(name, newpath);
 537	}
 538
 539	fd = do_open(name);
 540
 541	if (decomp)
 542		unlink(name);
 543
 544out:
 545	mutex_unlock(&dso->lock);
 546	free(name);
 547	return fd;
 548}
 549
 550static void check_data_close(void);
 551
 552/**
 553 * dso_close - Open DSO data file
 554 * @dso: dso object
 555 *
 556 * Open @dso's data file descriptor and updates
 557 * list/count of open DSO objects.
 558 */
 559static int open_dso(struct dso *dso, struct machine *machine)
 560{
 561	int fd;
 562	struct nscookie nsc;
 563
 564	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
 565		mutex_lock(&dso->lock);
 566		nsinfo__mountns_enter(dso->nsinfo, &nsc);
 567		mutex_unlock(&dso->lock);
 568	}
 569	fd = __open_dso(dso, machine);
 570	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
 571		nsinfo__mountns_exit(&nsc);
 572
 573	if (fd >= 0) {
 574		dso__list_add(dso);
 575		/*
 576		 * Check if we crossed the allowed number
 577		 * of opened DSOs and close one if needed.
 578		 */
 579		check_data_close();
 580	}
 581
 582	return fd;
 583}
 584
 585static void close_data_fd(struct dso *dso)
 586{
 587	if (dso->data.fd >= 0) {
 588		close(dso->data.fd);
 589		dso->data.fd = -1;
 590		dso->data.file_size = 0;
 591		dso__list_del(dso);
 592	}
 593}
 594
 595/**
 596 * dso_close - Close DSO data file
 597 * @dso: dso object
 598 *
 599 * Close @dso's data file descriptor and updates
 600 * list/count of open DSO objects.
 601 */
 602static void close_dso(struct dso *dso)
 603{
 604	close_data_fd(dso);
 605}
 606
 607static void close_first_dso(void)
 608{
 609	struct dso *dso;
 610
 611	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
 612	close_dso(dso);
 613}
 614
 615static rlim_t get_fd_limit(void)
 616{
 617	struct rlimit l;
 618	rlim_t limit = 0;
 619
 620	/* Allow half of the current open fd limit. */
 621	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
 622		if (l.rlim_cur == RLIM_INFINITY)
 623			limit = l.rlim_cur;
 624		else
 625			limit = l.rlim_cur / 2;
 626	} else {
 627		pr_err("failed to get fd limit\n");
 628		limit = 1;
 629	}
 630
 631	return limit;
 632}
 633
 634static rlim_t fd_limit;
 635
 636/*
 637 * Used only by tests/dso-data.c to reset the environment
 638 * for tests. I dont expect we should change this during
 639 * standard runtime.
 640 */
 641void reset_fd_limit(void)
 642{
 643	fd_limit = 0;
 644}
 645
 646static bool may_cache_fd(void)
 647{
 648	if (!fd_limit)
 649		fd_limit = get_fd_limit();
 650
 651	if (fd_limit == RLIM_INFINITY)
 652		return true;
 653
 654	return fd_limit > (rlim_t) dso__data_open_cnt;
 655}
 656
 657/*
 658 * Check and close LRU dso if we crossed allowed limit
 659 * for opened dso file descriptors. The limit is half
 660 * of the RLIMIT_NOFILE files opened.
 661*/
 662static void check_data_close(void)
 663{
 664	bool cache_fd = may_cache_fd();
 665
 666	if (!cache_fd)
 667		close_first_dso();
 668}
 669
 670/**
 671 * dso__data_close - Close DSO data file
 672 * @dso: dso object
 673 *
 674 * External interface to close @dso's data file descriptor.
 675 */
 676void dso__data_close(struct dso *dso)
 677{
 678	pthread_mutex_lock(&dso__data_open_lock);
 679	close_dso(dso);
 680	pthread_mutex_unlock(&dso__data_open_lock);
 681}
 682
 683static void try_to_open_dso(struct dso *dso, struct machine *machine)
 684{
 685	enum dso_binary_type binary_type_data[] = {
 686		DSO_BINARY_TYPE__BUILD_ID_CACHE,
 687		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
 688		DSO_BINARY_TYPE__NOT_FOUND,
 689	};
 690	int i = 0;
 691
 692	if (dso->data.fd >= 0)
 693		return;
 694
 695	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
 696		dso->data.fd = open_dso(dso, machine);
 697		goto out;
 698	}
 699
 700	do {
 701		dso->binary_type = binary_type_data[i++];
 702
 703		dso->data.fd = open_dso(dso, machine);
 704		if (dso->data.fd >= 0)
 705			goto out;
 706
 707	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
 708out:
 709	if (dso->data.fd >= 0)
 710		dso->data.status = DSO_DATA_STATUS_OK;
 711	else
 712		dso->data.status = DSO_DATA_STATUS_ERROR;
 713}
 714
 715/**
 716 * dso__data_get_fd - Get dso's data file descriptor
 717 * @dso: dso object
 718 * @machine: machine object
 719 *
 720 * External interface to find dso's file, open it and
 721 * returns file descriptor.  It should be paired with
 722 * dso__data_put_fd() if it returns non-negative value.
 723 */
 724int dso__data_get_fd(struct dso *dso, struct machine *machine)
 725{
 726	if (dso->data.status == DSO_DATA_STATUS_ERROR)
 727		return -1;
 728
 729	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
 730		return -1;
 731
 732	try_to_open_dso(dso, machine);
 733
 734	if (dso->data.fd < 0)
 735		pthread_mutex_unlock(&dso__data_open_lock);
 736
 737	return dso->data.fd;
 738}
 739
 740void dso__data_put_fd(struct dso *dso __maybe_unused)
 741{
 742	pthread_mutex_unlock(&dso__data_open_lock);
 743}
 744
 745bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
 746{
 747	u32 flag = 1 << by;
 748
 749	if (dso->data.status_seen & flag)
 750		return true;
 751
 752	dso->data.status_seen |= flag;
 753
 754	return false;
 755}
 756
 757#ifdef HAVE_LIBBPF_SUPPORT
 758static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
 759{
 760	struct bpf_prog_info_node *node;
 761	ssize_t size = DSO__DATA_CACHE_SIZE;
 762	u64 len;
 763	u8 *buf;
 764
 765	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
 766	if (!node || !node->info_linear) {
 767		dso->data.status = DSO_DATA_STATUS_ERROR;
 768		return -1;
 769	}
 770
 771	len = node->info_linear->info.jited_prog_len;
 772	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
 773
 774	if (offset >= len)
 775		return -1;
 776
 777	size = (ssize_t)min(len - offset, (u64)size);
 778	memcpy(data, buf + offset, size);
 779	return size;
 780}
 781
 782static int bpf_size(struct dso *dso)
 783{
 784	struct bpf_prog_info_node *node;
 785
 786	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
 787	if (!node || !node->info_linear) {
 788		dso->data.status = DSO_DATA_STATUS_ERROR;
 789		return -1;
 790	}
 791
 792	dso->data.file_size = node->info_linear->info.jited_prog_len;
 793	return 0;
 794}
 795#endif // HAVE_LIBBPF_SUPPORT
 796
 797static void
 798dso_cache__free(struct dso *dso)
 799{
 800	struct rb_root *root = &dso->data.cache;
 801	struct rb_node *next = rb_first(root);
 802
 803	mutex_lock(&dso->lock);
 804	while (next) {
 805		struct dso_cache *cache;
 806
 807		cache = rb_entry(next, struct dso_cache, rb_node);
 808		next = rb_next(&cache->rb_node);
 809		rb_erase(&cache->rb_node, root);
 810		free(cache);
 811	}
 812	mutex_unlock(&dso->lock);
 813}
 814
 815static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
 816{
 817	const struct rb_root *root = &dso->data.cache;
 818	struct rb_node * const *p = &root->rb_node;
 819	const struct rb_node *parent = NULL;
 820	struct dso_cache *cache;
 821
 822	while (*p != NULL) {
 823		u64 end;
 824
 825		parent = *p;
 826		cache = rb_entry(parent, struct dso_cache, rb_node);
 827		end = cache->offset + DSO__DATA_CACHE_SIZE;
 828
 829		if (offset < cache->offset)
 830			p = &(*p)->rb_left;
 831		else if (offset >= end)
 832			p = &(*p)->rb_right;
 833		else
 834			return cache;
 835	}
 836
 837	return NULL;
 838}
 839
 840static struct dso_cache *
 841dso_cache__insert(struct dso *dso, struct dso_cache *new)
 842{
 843	struct rb_root *root = &dso->data.cache;
 844	struct rb_node **p = &root->rb_node;
 845	struct rb_node *parent = NULL;
 846	struct dso_cache *cache;
 847	u64 offset = new->offset;
 848
 849	mutex_lock(&dso->lock);
 850	while (*p != NULL) {
 851		u64 end;
 852
 853		parent = *p;
 854		cache = rb_entry(parent, struct dso_cache, rb_node);
 855		end = cache->offset + DSO__DATA_CACHE_SIZE;
 856
 857		if (offset < cache->offset)
 858			p = &(*p)->rb_left;
 859		else if (offset >= end)
 860			p = &(*p)->rb_right;
 861		else
 862			goto out;
 863	}
 864
 865	rb_link_node(&new->rb_node, parent, p);
 866	rb_insert_color(&new->rb_node, root);
 867
 868	cache = NULL;
 869out:
 870	mutex_unlock(&dso->lock);
 871	return cache;
 872}
 873
 874static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
 875				 u64 size, bool out)
 876{
 877	u64 cache_offset = offset - cache->offset;
 878	u64 cache_size   = min(cache->size - cache_offset, size);
 879
 880	if (out)
 881		memcpy(data, cache->data + cache_offset, cache_size);
 882	else
 883		memcpy(cache->data + cache_offset, data, cache_size);
 884	return cache_size;
 885}
 886
 887static ssize_t file_read(struct dso *dso, struct machine *machine,
 888			 u64 offset, char *data)
 889{
 890	ssize_t ret;
 891
 892	pthread_mutex_lock(&dso__data_open_lock);
 893
 894	/*
 895	 * dso->data.fd might be closed if other thread opened another
 896	 * file (dso) due to open file limit (RLIMIT_NOFILE).
 897	 */
 898	try_to_open_dso(dso, machine);
 899
 900	if (dso->data.fd < 0) {
 901		dso->data.status = DSO_DATA_STATUS_ERROR;
 902		ret = -errno;
 903		goto out;
 904	}
 905
 906	ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
 907out:
 908	pthread_mutex_unlock(&dso__data_open_lock);
 909	return ret;
 910}
 911
 912static struct dso_cache *dso_cache__populate(struct dso *dso,
 913					     struct machine *machine,
 914					     u64 offset, ssize_t *ret)
 915{
 916	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
 917	struct dso_cache *cache;
 918	struct dso_cache *old;
 919
 920	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
 921	if (!cache) {
 922		*ret = -ENOMEM;
 923		return NULL;
 924	}
 925#ifdef HAVE_LIBBPF_SUPPORT
 926	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
 927		*ret = bpf_read(dso, cache_offset, cache->data);
 928	else
 929#endif
 930	if (dso->binary_type == DSO_BINARY_TYPE__OOL)
 931		*ret = DSO__DATA_CACHE_SIZE;
 932	else
 933		*ret = file_read(dso, machine, cache_offset, cache->data);
 934
 935	if (*ret <= 0) {
 936		free(cache);
 937		return NULL;
 938	}
 939
 940	cache->offset = cache_offset;
 941	cache->size   = *ret;
 942
 943	old = dso_cache__insert(dso, cache);
 944	if (old) {
 945		/* we lose the race */
 946		free(cache);
 947		cache = old;
 948	}
 949
 950	return cache;
 951}
 952
 953static struct dso_cache *dso_cache__find(struct dso *dso,
 954					 struct machine *machine,
 955					 u64 offset,
 956					 ssize_t *ret)
 957{
 958	struct dso_cache *cache = __dso_cache__find(dso, offset);
 959
 960	return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
 961}
 962
 963static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
 964			    u64 offset, u8 *data, ssize_t size, bool out)
 965{
 966	struct dso_cache *cache;
 967	ssize_t ret = 0;
 968
 969	cache = dso_cache__find(dso, machine, offset, &ret);
 970	if (!cache)
 971		return ret;
 972
 973	return dso_cache__memcpy(cache, offset, data, size, out);
 974}
 975
 976/*
 977 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
 978 * in the rb_tree. Any read to already cached data is served
 979 * by cached data. Writes update the cache only, not the backing file.
 980 */
 981static ssize_t cached_io(struct dso *dso, struct machine *machine,
 982			 u64 offset, u8 *data, ssize_t size, bool out)
 983{
 984	ssize_t r = 0;
 985	u8 *p = data;
 986
 987	do {
 988		ssize_t ret;
 989
 990		ret = dso_cache_io(dso, machine, offset, p, size, out);
 991		if (ret < 0)
 992			return ret;
 993
 994		/* Reached EOF, return what we have. */
 995		if (!ret)
 996			break;
 997
 998		BUG_ON(ret > size);
 999
1000		r      += ret;
1001		p      += ret;
1002		offset += ret;
1003		size   -= ret;
1004
1005	} while (size);
1006
1007	return r;
1008}
1009
1010static int file_size(struct dso *dso, struct machine *machine)
1011{
1012	int ret = 0;
1013	struct stat st;
1014	char sbuf[STRERR_BUFSIZE];
1015
1016	pthread_mutex_lock(&dso__data_open_lock);
1017
1018	/*
1019	 * dso->data.fd might be closed if other thread opened another
1020	 * file (dso) due to open file limit (RLIMIT_NOFILE).
1021	 */
1022	try_to_open_dso(dso, machine);
1023
1024	if (dso->data.fd < 0) {
1025		ret = -errno;
1026		dso->data.status = DSO_DATA_STATUS_ERROR;
1027		goto out;
1028	}
1029
1030	if (fstat(dso->data.fd, &st) < 0) {
1031		ret = -errno;
1032		pr_err("dso cache fstat failed: %s\n",
1033		       str_error_r(errno, sbuf, sizeof(sbuf)));
1034		dso->data.status = DSO_DATA_STATUS_ERROR;
1035		goto out;
1036	}
1037	dso->data.file_size = st.st_size;
1038
1039out:
1040	pthread_mutex_unlock(&dso__data_open_lock);
1041	return ret;
1042}
1043
1044int dso__data_file_size(struct dso *dso, struct machine *machine)
1045{
1046	if (dso->data.file_size)
1047		return 0;
1048
1049	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1050		return -1;
1051#ifdef HAVE_LIBBPF_SUPPORT
1052	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
1053		return bpf_size(dso);
1054#endif
1055	return file_size(dso, machine);
1056}
1057
1058/**
1059 * dso__data_size - Return dso data size
1060 * @dso: dso object
1061 * @machine: machine object
1062 *
1063 * Return: dso data size
1064 */
1065off_t dso__data_size(struct dso *dso, struct machine *machine)
1066{
1067	if (dso__data_file_size(dso, machine))
1068		return -1;
1069
1070	/* For now just estimate dso data size is close to file size */
1071	return dso->data.file_size;
1072}
1073
1074static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1075				      u64 offset, u8 *data, ssize_t size,
1076				      bool out)
1077{
1078	if (dso__data_file_size(dso, machine))
1079		return -1;
1080
1081	/* Check the offset sanity. */
1082	if (offset > dso->data.file_size)
1083		return -1;
1084
1085	if (offset + size < offset)
1086		return -1;
1087
1088	return cached_io(dso, machine, offset, data, size, out);
1089}
1090
1091/**
1092 * dso__data_read_offset - Read data from dso file offset
1093 * @dso: dso object
1094 * @machine: machine object
1095 * @offset: file offset
1096 * @data: buffer to store data
1097 * @size: size of the @data buffer
1098 *
1099 * External interface to read data from dso file offset. Open
1100 * dso data file and use cached_read to get the data.
1101 */
1102ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1103			      u64 offset, u8 *data, ssize_t size)
1104{
1105	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1106		return -1;
1107
1108	return data_read_write_offset(dso, machine, offset, data, size, true);
1109}
1110
1111/**
1112 * dso__data_read_addr - Read data from dso address
1113 * @dso: dso object
1114 * @machine: machine object
1115 * @add: virtual memory address
1116 * @data: buffer to store data
1117 * @size: size of the @data buffer
1118 *
1119 * External interface to read data from dso address.
1120 */
1121ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1122			    struct machine *machine, u64 addr,
1123			    u8 *data, ssize_t size)
1124{
1125	u64 offset = map->map_ip(map, addr);
1126	return dso__data_read_offset(dso, machine, offset, data, size);
1127}
1128
1129/**
1130 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1131 * @dso: dso object
1132 * @machine: machine object
1133 * @offset: file offset
1134 * @data: buffer to write
1135 * @size: size of the @data buffer
1136 *
1137 * Write into the dso file data cache, but do not change the file itself.
1138 */
1139ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1140				   u64 offset, const u8 *data_in, ssize_t size)
1141{
1142	u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1143
1144	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1145		return -1;
1146
1147	return data_read_write_offset(dso, machine, offset, data, size, false);
1148}
1149
1150/**
1151 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1152 * @dso: dso object
1153 * @machine: machine object
1154 * @add: virtual memory address
1155 * @data: buffer to write
1156 * @size: size of the @data buffer
1157 *
1158 * External interface to write into the dso file data cache, but do not change
1159 * the file itself.
1160 */
1161ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1162				   struct machine *machine, u64 addr,
1163				   const u8 *data, ssize_t size)
1164{
1165	u64 offset = map->map_ip(map, addr);
1166	return dso__data_write_cache_offs(dso, machine, offset, data, size);
1167}
1168
1169struct map *dso__new_map(const char *name)
1170{
1171	struct map *map = NULL;
1172	struct dso *dso = dso__new(name);
1173
1174	if (dso) {
1175		map = map__new2(0, dso);
1176		dso__put(dso);
1177	}
1178
1179	return map;
1180}
1181
1182struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1183				    const char *short_name, int dso_type)
1184{
1185	/*
1186	 * The kernel dso could be created by build_id processing.
1187	 */
1188	struct dso *dso = machine__findnew_dso(machine, name);
1189
1190	/*
1191	 * We need to run this in all cases, since during the build_id
1192	 * processing we had no idea this was the kernel dso.
1193	 */
1194	if (dso != NULL) {
1195		dso__set_short_name(dso, short_name, false);
1196		dso->kernel = dso_type;
1197	}
1198
1199	return dso;
1200}
1201
1202static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
1203{
1204	struct rb_root *root = dso->root;
1205
1206	if (name == NULL)
1207		return;
1208
1209	if (dso->long_name_allocated)
1210		free((char *)dso->long_name);
1211
1212	if (root) {
1213		rb_erase(&dso->rb_node, root);
1214		/*
1215		 * __dsos__findnew_link_by_longname_id() isn't guaranteed to
1216		 * add it back, so a clean removal is required here.
1217		 */
1218		RB_CLEAR_NODE(&dso->rb_node);
1219		dso->root = NULL;
1220	}
1221
1222	dso->long_name		 = name;
1223	dso->long_name_len	 = strlen(name);
1224	dso->long_name_allocated = name_allocated;
1225
1226	if (root)
1227		__dsos__findnew_link_by_longname_id(root, dso, NULL, id);
1228}
1229
1230void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1231{
1232	dso__set_long_name_id(dso, name, NULL, name_allocated);
1233}
1234
1235void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1236{
1237	if (name == NULL)
1238		return;
1239
1240	if (dso->short_name_allocated)
1241		free((char *)dso->short_name);
1242
1243	dso->short_name		  = name;
1244	dso->short_name_len	  = strlen(name);
1245	dso->short_name_allocated = name_allocated;
1246}
1247
1248int dso__name_len(const struct dso *dso)
1249{
1250	if (!dso)
1251		return strlen("[unknown]");
1252	if (verbose > 0)
1253		return dso->long_name_len;
1254
1255	return dso->short_name_len;
1256}
1257
1258bool dso__loaded(const struct dso *dso)
1259{
1260	return dso->loaded;
1261}
1262
1263bool dso__sorted_by_name(const struct dso *dso)
1264{
1265	return dso->sorted_by_name;
1266}
1267
1268void dso__set_sorted_by_name(struct dso *dso)
1269{
1270	dso->sorted_by_name = true;
1271}
1272
1273struct dso *dso__new_id(const char *name, struct dso_id *id)
1274{
1275	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1276
1277	if (dso != NULL) {
1278		strcpy(dso->name, name);
1279		if (id)
1280			dso->id = *id;
1281		dso__set_long_name_id(dso, dso->name, id, false);
1282		dso__set_short_name(dso, dso->name, false);
1283		dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
1284		dso->data.cache = RB_ROOT;
1285		dso->inlined_nodes = RB_ROOT_CACHED;
1286		dso->srclines = RB_ROOT_CACHED;
1287		dso->data.fd = -1;
1288		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1289		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1290		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1291		dso->is_64_bit = (sizeof(void *) == 8);
1292		dso->loaded = 0;
1293		dso->rel = 0;
1294		dso->sorted_by_name = 0;
1295		dso->has_build_id = 0;
1296		dso->has_srcline = 1;
1297		dso->a2l_fails = 1;
1298		dso->kernel = DSO_SPACE__USER;
1299		dso->needs_swap = DSO_SWAP__UNSET;
1300		dso->comp = COMP_ID__NONE;
1301		RB_CLEAR_NODE(&dso->rb_node);
1302		dso->root = NULL;
1303		INIT_LIST_HEAD(&dso->node);
1304		INIT_LIST_HEAD(&dso->data.open_entry);
1305		mutex_init(&dso->lock);
1306		refcount_set(&dso->refcnt, 1);
1307	}
1308
1309	return dso;
1310}
1311
1312struct dso *dso__new(const char *name)
1313{
1314	return dso__new_id(name, NULL);
1315}
1316
1317void dso__delete(struct dso *dso)
1318{
1319	if (!RB_EMPTY_NODE(&dso->rb_node))
1320		pr_err("DSO %s is still in rbtree when being deleted!\n",
1321		       dso->long_name);
1322
1323	/* free inlines first, as they reference symbols */
1324	inlines__tree_delete(&dso->inlined_nodes);
1325	srcline__tree_delete(&dso->srclines);
1326	symbols__delete(&dso->symbols);
1327
1328	if (dso->short_name_allocated) {
1329		zfree((char **)&dso->short_name);
1330		dso->short_name_allocated = false;
1331	}
1332
1333	if (dso->long_name_allocated) {
1334		zfree((char **)&dso->long_name);
1335		dso->long_name_allocated = false;
1336	}
1337
1338	dso__data_close(dso);
1339	auxtrace_cache__free(dso->auxtrace_cache);
1340	dso_cache__free(dso);
1341	dso__free_a2l(dso);
1342	zfree(&dso->symsrc_filename);
1343	nsinfo__zput(dso->nsinfo);
1344	mutex_destroy(&dso->lock);
1345	free(dso);
1346}
1347
1348struct dso *dso__get(struct dso *dso)
1349{
1350	if (dso)
1351		refcount_inc(&dso->refcnt);
1352	return dso;
1353}
1354
1355void dso__put(struct dso *dso)
1356{
1357	if (dso && refcount_dec_and_test(&dso->refcnt))
1358		dso__delete(dso);
1359}
1360
1361void dso__set_build_id(struct dso *dso, struct build_id *bid)
1362{
1363	dso->bid = *bid;
1364	dso->has_build_id = 1;
1365}
1366
1367bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
1368{
1369	if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
1370		/*
1371		 * For the backward compatibility, it allows a build-id has
1372		 * trailing zeros.
1373		 */
1374		return !memcmp(dso->bid.data, bid->data, bid->size) &&
1375			!memchr_inv(&dso->bid.data[bid->size], 0,
1376				    dso->bid.size - bid->size);
1377	}
1378
1379	return dso->bid.size == bid->size &&
1380	       memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
1381}
1382
1383void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1384{
1385	char path[PATH_MAX];
1386
1387	if (machine__is_default_guest(machine))
1388		return;
1389	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1390	if (sysfs__read_build_id(path, &dso->bid) == 0)
 
1391		dso->has_build_id = true;
1392}
1393
1394int dso__kernel_module_get_build_id(struct dso *dso,
1395				    const char *root_dir)
1396{
1397	char filename[PATH_MAX];
1398	/*
1399	 * kernel module short names are of the form "[module]" and
1400	 * we need just "module" here.
1401	 */
1402	const char *name = dso->short_name + 1;
1403
1404	snprintf(filename, sizeof(filename),
1405		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1406		 root_dir, (int)strlen(name) - 1, name);
1407
1408	if (sysfs__read_build_id(filename, &dso->bid) == 0)
 
1409		dso->has_build_id = true;
1410
1411	return 0;
1412}
1413
1414static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1415{
1416	char sbuild_id[SBUILD_ID_SIZE];
1417
1418	build_id__sprintf(&dso->bid, sbuild_id);
1419	return fprintf(fp, "%s", sbuild_id);
1420}
1421
1422size_t dso__fprintf(struct dso *dso, FILE *fp)
1423{
1424	struct rb_node *nd;
1425	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1426
1427	if (dso->short_name != dso->long_name)
1428		ret += fprintf(fp, "%s, ", dso->long_name);
1429	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1430	ret += dso__fprintf_buildid(dso, fp);
1431	ret += fprintf(fp, ")\n");
1432	for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1433		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1434		ret += symbol__fprintf(pos, fp);
1435	}
1436
1437	return ret;
1438}
1439
1440enum dso_type dso__type(struct dso *dso, struct machine *machine)
1441{
1442	int fd;
1443	enum dso_type type = DSO__TYPE_UNKNOWN;
1444
1445	fd = dso__data_get_fd(dso, machine);
1446	if (fd >= 0) {
1447		type = dso__type_fd(fd);
1448		dso__data_put_fd(dso);
1449	}
1450
1451	return type;
1452}
1453
1454int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1455{
1456	int idx, errnum = dso->load_errno;
1457	/*
1458	 * This must have a same ordering as the enum dso_load_errno.
1459	 */
1460	static const char *dso_load__error_str[] = {
1461	"Internal tools/perf/ library error",
1462	"Invalid ELF file",
1463	"Can not read build id",
1464	"Mismatching build id",
1465	"Decompression failure",
1466	};
1467
1468	BUG_ON(buflen == 0);
1469
1470	if (errnum >= 0) {
1471		const char *err = str_error_r(errnum, buf, buflen);
1472
1473		if (err != buf)
1474			scnprintf(buf, buflen, "%s", err);
1475
1476		return 0;
1477	}
1478
1479	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1480		return -1;
1481
1482	idx = errnum - __DSO_LOAD_ERRNO__START;
1483	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1484	return 0;
1485}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2#include <asm/bug.h>
   3#include <linux/kernel.h>
   4#include <linux/string.h>
   5#include <linux/zalloc.h>
   6#include <sys/time.h>
   7#include <sys/resource.h>
   8#include <sys/types.h>
   9#include <sys/stat.h>
  10#include <unistd.h>
  11#include <errno.h>
  12#include <fcntl.h>
  13#include <stdlib.h>
 
  14#include <bpf/libbpf.h>
  15#include "bpf-event.h"
 
 
  16#include "compress.h"
  17#include "env.h"
  18#include "namespaces.h"
  19#include "path.h"
  20#include "map.h"
  21#include "symbol.h"
  22#include "srcline.h"
  23#include "dso.h"
  24#include "dsos.h"
  25#include "machine.h"
  26#include "auxtrace.h"
  27#include "util.h" /* O_CLOEXEC for older systems */
  28#include "debug.h"
  29#include "string2.h"
  30#include "vdso.h"
  31
  32static const char * const debuglink_paths[] = {
  33	"%.0s%s",
  34	"%s/%s",
  35	"%s/.debug/%s",
  36	"/usr/lib/debug%s/%s"
  37};
  38
  39char dso__symtab_origin(const struct dso *dso)
  40{
  41	static const char origin[] = {
  42		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
  43		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
  44		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
  45		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
  46		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
  47		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
  48		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
  49		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
  50		[DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO]	= 'x',
  51		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
  52		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
  53		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
  54		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
  55		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
  56		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
  57		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
  58		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
  59		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
  60	};
  61
  62	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  63		return '!';
  64	return origin[dso->symtab_type];
  65}
  66
  67int dso__read_binary_type_filename(const struct dso *dso,
  68				   enum dso_binary_type type,
  69				   char *root_dir, char *filename, size_t size)
  70{
  71	char build_id_hex[SBUILD_ID_SIZE];
  72	int ret = 0;
  73	size_t len;
  74
  75	switch (type) {
  76	case DSO_BINARY_TYPE__DEBUGLINK:
  77	{
  78		const char *last_slash;
  79		char dso_dir[PATH_MAX];
  80		char symfile[PATH_MAX];
  81		unsigned int i;
  82
  83		len = __symbol__join_symfs(filename, size, dso->long_name);
  84		last_slash = filename + len;
  85		while (last_slash != filename && *last_slash != '/')
  86			last_slash--;
  87
  88		strncpy(dso_dir, filename, last_slash - filename);
  89		dso_dir[last_slash-filename] = '\0';
  90
  91		if (!is_regular_file(filename)) {
  92			ret = -1;
  93			break;
  94		}
  95
  96		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
  97		if (ret)
  98			break;
  99
 100		/* Check predefined locations where debug file might reside */
 101		ret = -1;
 102		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
 103			snprintf(filename, size,
 104					debuglink_paths[i], dso_dir, symfile);
 105			if (is_regular_file(filename)) {
 106				ret = 0;
 107				break;
 108			}
 109		}
 110
 111		break;
 112	}
 113	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
 114		if (dso__build_id_filename(dso, filename, size, false) == NULL)
 115			ret = -1;
 116		break;
 117
 118	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
 119		if (dso__build_id_filename(dso, filename, size, true) == NULL)
 120			ret = -1;
 121		break;
 122
 123	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
 124		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 125		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
 126		break;
 127
 128	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
 129		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 130		snprintf(filename + len, size - len, "%s", dso->long_name);
 131		break;
 132
 133	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
 134		/*
 135		 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
 136		 * /usr/lib/debug/lib when it is expected to be in
 137		 * /usr/lib/debug/usr/lib
 138		 */
 139		if (strlen(dso->long_name) < 9 ||
 140		    strncmp(dso->long_name, "/usr/lib/", 9)) {
 141			ret = -1;
 142			break;
 143		}
 144		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 145		snprintf(filename + len, size - len, "%s", dso->long_name + 4);
 146		break;
 147
 148	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
 149	{
 150		const char *last_slash;
 151		size_t dir_size;
 152
 153		last_slash = dso->long_name + dso->long_name_len;
 154		while (last_slash != dso->long_name && *last_slash != '/')
 155			last_slash--;
 156
 157		len = __symbol__join_symfs(filename, size, "");
 158		dir_size = last_slash - dso->long_name + 2;
 159		if (dir_size > (size - len)) {
 160			ret = -1;
 161			break;
 162		}
 163		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
 164		len += scnprintf(filename + len , size - len, ".debug%s",
 165								last_slash);
 166		break;
 167	}
 168
 169	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
 170		if (!dso->has_build_id) {
 171			ret = -1;
 172			break;
 173		}
 174
 175		build_id__sprintf(dso->build_id,
 176				  sizeof(dso->build_id),
 177				  build_id_hex);
 178		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
 179		snprintf(filename + len, size - len, "%.2s/%s.debug",
 180			 build_id_hex, build_id_hex + 2);
 181		break;
 182
 183	case DSO_BINARY_TYPE__VMLINUX:
 184	case DSO_BINARY_TYPE__GUEST_VMLINUX:
 185	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
 186		__symbol__join_symfs(filename, size, dso->long_name);
 187		break;
 188
 189	case DSO_BINARY_TYPE__GUEST_KMODULE:
 190	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
 191		path__join3(filename, size, symbol_conf.symfs,
 192			    root_dir, dso->long_name);
 193		break;
 194
 195	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
 196	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
 197		__symbol__join_symfs(filename, size, dso->long_name);
 198		break;
 199
 200	case DSO_BINARY_TYPE__KCORE:
 201	case DSO_BINARY_TYPE__GUEST_KCORE:
 202		snprintf(filename, size, "%s", dso->long_name);
 203		break;
 204
 205	default:
 206	case DSO_BINARY_TYPE__KALLSYMS:
 207	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
 208	case DSO_BINARY_TYPE__JAVA_JIT:
 209	case DSO_BINARY_TYPE__BPF_PROG_INFO:
 210	case DSO_BINARY_TYPE__BPF_IMAGE:
 211	case DSO_BINARY_TYPE__OOL:
 212	case DSO_BINARY_TYPE__NOT_FOUND:
 213		ret = -1;
 214		break;
 215	}
 216
 217	return ret;
 218}
 219
 220enum {
 221	COMP_ID__NONE = 0,
 222};
 223
 224static const struct {
 225	const char *fmt;
 226	int (*decompress)(const char *input, int output);
 227	bool (*is_compressed)(const char *input);
 228} compressions[] = {
 229	[COMP_ID__NONE] = { .fmt = NULL, },
 230#ifdef HAVE_ZLIB_SUPPORT
 231	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
 232#endif
 233#ifdef HAVE_LZMA_SUPPORT
 234	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
 235#endif
 236	{ NULL, NULL, NULL },
 237};
 238
 239static int is_supported_compression(const char *ext)
 240{
 241	unsigned i;
 242
 243	for (i = 1; compressions[i].fmt; i++) {
 244		if (!strcmp(ext, compressions[i].fmt))
 245			return i;
 246	}
 247	return COMP_ID__NONE;
 248}
 249
 250bool is_kernel_module(const char *pathname, int cpumode)
 251{
 252	struct kmod_path m;
 253	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
 254
 255	WARN_ONCE(mode != cpumode,
 256		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
 257		  cpumode);
 258
 259	switch (mode) {
 260	case PERF_RECORD_MISC_USER:
 261	case PERF_RECORD_MISC_HYPERVISOR:
 262	case PERF_RECORD_MISC_GUEST_USER:
 263		return false;
 264	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
 265	default:
 266		if (kmod_path__parse(&m, pathname)) {
 267			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
 268					pathname);
 269			return true;
 270		}
 271	}
 272
 273	return m.kmod;
 274}
 275
 276bool dso__needs_decompress(struct dso *dso)
 277{
 278	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
 279		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 280}
 281
 282static int decompress_kmodule(struct dso *dso, const char *name,
 283			      char *pathname, size_t len)
 284{
 285	char tmpbuf[] = KMOD_DECOMP_NAME;
 286	int fd = -1;
 287
 288	if (!dso__needs_decompress(dso))
 289		return -1;
 290
 291	if (dso->comp == COMP_ID__NONE)
 292		return -1;
 293
 294	/*
 295	 * We have proper compression id for DSO and yet the file
 296	 * behind the 'name' can still be plain uncompressed object.
 297	 *
 298	 * The reason is behind the logic we open the DSO object files,
 299	 * when we try all possible 'debug' objects until we find the
 300	 * data. So even if the DSO is represented by 'krava.xz' module,
 301	 * we can end up here opening ~/.debug/....23432432/debug' file
 302	 * which is not compressed.
 303	 *
 304	 * To keep this transparent, we detect this and return the file
 305	 * descriptor to the uncompressed file.
 306	 */
 307	if (!compressions[dso->comp].is_compressed(name))
 308		return open(name, O_RDONLY);
 309
 310	fd = mkstemp(tmpbuf);
 311	if (fd < 0) {
 312		dso->load_errno = errno;
 313		return -1;
 314	}
 315
 316	if (compressions[dso->comp].decompress(name, fd)) {
 317		dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
 318		close(fd);
 319		fd = -1;
 320	}
 321
 322	if (!pathname || (fd < 0))
 323		unlink(tmpbuf);
 324
 325	if (pathname && (fd >= 0))
 326		strlcpy(pathname, tmpbuf, len);
 327
 328	return fd;
 329}
 330
 
 
 
 
 
 
 
 
 
 
 
 
 
 331int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
 332{
 333	return decompress_kmodule(dso, name, NULL, 0);
 334}
 335
 336int dso__decompress_kmodule_path(struct dso *dso, const char *name,
 337				 char *pathname, size_t len)
 338{
 339	int fd = decompress_kmodule(dso, name, pathname, len);
 340
 341	close(fd);
 342	return fd >= 0 ? 0 : -1;
 343}
 344
 345/*
 346 * Parses kernel module specified in @path and updates
 347 * @m argument like:
 348 *
 349 *    @comp - true if @path contains supported compression suffix,
 350 *            false otherwise
 351 *    @kmod - true if @path contains '.ko' suffix in right position,
 352 *            false otherwise
 353 *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
 354 *            of the kernel module without suffixes, otherwise strudup-ed
 355 *            base name of @path
 356 *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
 357 *            the compression suffix
 358 *
 359 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
 360 */
 361int __kmod_path__parse(struct kmod_path *m, const char *path,
 362		       bool alloc_name)
 363{
 364	const char *name = strrchr(path, '/');
 365	const char *ext  = strrchr(path, '.');
 366	bool is_simple_name = false;
 367
 368	memset(m, 0x0, sizeof(*m));
 369	name = name ? name + 1 : path;
 370
 371	/*
 372	 * '.' is also a valid character for module name. For example:
 373	 * [aaa.bbb] is a valid module name. '[' should have higher
 374	 * priority than '.ko' suffix.
 375	 *
 376	 * The kernel names are from machine__mmap_name. Such
 377	 * name should belong to kernel itself, not kernel module.
 378	 */
 379	if (name[0] == '[') {
 380		is_simple_name = true;
 381		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
 382		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
 383		    (strncmp(name, "[vdso]", 6) == 0) ||
 384		    (strncmp(name, "[vdso32]", 8) == 0) ||
 385		    (strncmp(name, "[vdsox32]", 9) == 0) ||
 386		    (strncmp(name, "[vsyscall]", 10) == 0)) {
 387			m->kmod = false;
 388
 389		} else
 390			m->kmod = true;
 391	}
 392
 393	/* No extension, just return name. */
 394	if ((ext == NULL) || is_simple_name) {
 395		if (alloc_name) {
 396			m->name = strdup(name);
 397			return m->name ? 0 : -ENOMEM;
 398		}
 399		return 0;
 400	}
 401
 402	m->comp = is_supported_compression(ext + 1);
 403	if (m->comp > COMP_ID__NONE)
 404		ext -= 3;
 405
 406	/* Check .ko extension only if there's enough name left. */
 407	if (ext > name)
 408		m->kmod = !strncmp(ext, ".ko", 3);
 409
 410	if (alloc_name) {
 411		if (m->kmod) {
 412			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
 413				return -ENOMEM;
 414		} else {
 415			if (asprintf(&m->name, "%s", name) == -1)
 416				return -ENOMEM;
 417		}
 418
 419		strreplace(m->name, '-', '_');
 420	}
 421
 422	return 0;
 423}
 424
 425void dso__set_module_info(struct dso *dso, struct kmod_path *m,
 426			  struct machine *machine)
 427{
 428	if (machine__is_host(machine))
 429		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
 430	else
 431		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
 432
 433	/* _KMODULE_COMP should be next to _KMODULE */
 434	if (m->kmod && m->comp) {
 435		dso->symtab_type++;
 436		dso->comp = m->comp;
 437	}
 438
 439	dso__set_short_name(dso, strdup(m->name), true);
 440}
 441
 442/*
 443 * Global list of open DSOs and the counter.
 444 */
 445static LIST_HEAD(dso__data_open);
 446static long dso__data_open_cnt;
 447static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
 448
 449static void dso__list_add(struct dso *dso)
 450{
 451	list_add_tail(&dso->data.open_entry, &dso__data_open);
 452	dso__data_open_cnt++;
 453}
 454
 455static void dso__list_del(struct dso *dso)
 456{
 457	list_del_init(&dso->data.open_entry);
 458	WARN_ONCE(dso__data_open_cnt <= 0,
 459		  "DSO data fd counter out of bounds.");
 460	dso__data_open_cnt--;
 461}
 462
 463static void close_first_dso(void);
 464
 465static int do_open(char *name)
 466{
 467	int fd;
 468	char sbuf[STRERR_BUFSIZE];
 469
 470	do {
 471		fd = open(name, O_RDONLY|O_CLOEXEC);
 472		if (fd >= 0)
 473			return fd;
 474
 475		pr_debug("dso open failed: %s\n",
 476			 str_error_r(errno, sbuf, sizeof(sbuf)));
 477		if (!dso__data_open_cnt || errno != EMFILE)
 478			break;
 479
 480		close_first_dso();
 481	} while (1);
 482
 483	return -1;
 484}
 485
 486static int __open_dso(struct dso *dso, struct machine *machine)
 487{
 488	int fd = -EINVAL;
 489	char *root_dir = (char *)"";
 490	char *name = malloc(PATH_MAX);
 491	bool decomp = false;
 492
 493	if (!name)
 494		return -ENOMEM;
 495
 
 496	if (machine)
 497		root_dir = machine->root_dir;
 498
 499	if (dso__read_binary_type_filename(dso, dso->binary_type,
 500					    root_dir, name, PATH_MAX))
 501		goto out;
 502
 503	if (!is_regular_file(name))
 504		goto out;
 
 
 
 
 
 
 
 
 
 
 
 505
 506	if (dso__needs_decompress(dso)) {
 507		char newpath[KMOD_DECOMP_LEN];
 508		size_t len = sizeof(newpath);
 509
 510		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
 511			fd = -dso->load_errno;
 512			goto out;
 513		}
 514
 515		decomp = true;
 516		strcpy(name, newpath);
 517	}
 518
 519	fd = do_open(name);
 520
 521	if (decomp)
 522		unlink(name);
 523
 524out:
 
 525	free(name);
 526	return fd;
 527}
 528
 529static void check_data_close(void);
 530
 531/**
 532 * dso_close - Open DSO data file
 533 * @dso: dso object
 534 *
 535 * Open @dso's data file descriptor and updates
 536 * list/count of open DSO objects.
 537 */
 538static int open_dso(struct dso *dso, struct machine *machine)
 539{
 540	int fd;
 541	struct nscookie nsc;
 542
 543	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
 
 544		nsinfo__mountns_enter(dso->nsinfo, &nsc);
 
 
 545	fd = __open_dso(dso, machine);
 546	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
 547		nsinfo__mountns_exit(&nsc);
 548
 549	if (fd >= 0) {
 550		dso__list_add(dso);
 551		/*
 552		 * Check if we crossed the allowed number
 553		 * of opened DSOs and close one if needed.
 554		 */
 555		check_data_close();
 556	}
 557
 558	return fd;
 559}
 560
 561static void close_data_fd(struct dso *dso)
 562{
 563	if (dso->data.fd >= 0) {
 564		close(dso->data.fd);
 565		dso->data.fd = -1;
 566		dso->data.file_size = 0;
 567		dso__list_del(dso);
 568	}
 569}
 570
 571/**
 572 * dso_close - Close DSO data file
 573 * @dso: dso object
 574 *
 575 * Close @dso's data file descriptor and updates
 576 * list/count of open DSO objects.
 577 */
 578static void close_dso(struct dso *dso)
 579{
 580	close_data_fd(dso);
 581}
 582
 583static void close_first_dso(void)
 584{
 585	struct dso *dso;
 586
 587	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
 588	close_dso(dso);
 589}
 590
 591static rlim_t get_fd_limit(void)
 592{
 593	struct rlimit l;
 594	rlim_t limit = 0;
 595
 596	/* Allow half of the current open fd limit. */
 597	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
 598		if (l.rlim_cur == RLIM_INFINITY)
 599			limit = l.rlim_cur;
 600		else
 601			limit = l.rlim_cur / 2;
 602	} else {
 603		pr_err("failed to get fd limit\n");
 604		limit = 1;
 605	}
 606
 607	return limit;
 608}
 609
 610static rlim_t fd_limit;
 611
 612/*
 613 * Used only by tests/dso-data.c to reset the environment
 614 * for tests. I dont expect we should change this during
 615 * standard runtime.
 616 */
 617void reset_fd_limit(void)
 618{
 619	fd_limit = 0;
 620}
 621
 622static bool may_cache_fd(void)
 623{
 624	if (!fd_limit)
 625		fd_limit = get_fd_limit();
 626
 627	if (fd_limit == RLIM_INFINITY)
 628		return true;
 629
 630	return fd_limit > (rlim_t) dso__data_open_cnt;
 631}
 632
 633/*
 634 * Check and close LRU dso if we crossed allowed limit
 635 * for opened dso file descriptors. The limit is half
 636 * of the RLIMIT_NOFILE files opened.
 637*/
 638static void check_data_close(void)
 639{
 640	bool cache_fd = may_cache_fd();
 641
 642	if (!cache_fd)
 643		close_first_dso();
 644}
 645
 646/**
 647 * dso__data_close - Close DSO data file
 648 * @dso: dso object
 649 *
 650 * External interface to close @dso's data file descriptor.
 651 */
 652void dso__data_close(struct dso *dso)
 653{
 654	pthread_mutex_lock(&dso__data_open_lock);
 655	close_dso(dso);
 656	pthread_mutex_unlock(&dso__data_open_lock);
 657}
 658
 659static void try_to_open_dso(struct dso *dso, struct machine *machine)
 660{
 661	enum dso_binary_type binary_type_data[] = {
 662		DSO_BINARY_TYPE__BUILD_ID_CACHE,
 663		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
 664		DSO_BINARY_TYPE__NOT_FOUND,
 665	};
 666	int i = 0;
 667
 668	if (dso->data.fd >= 0)
 669		return;
 670
 671	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
 672		dso->data.fd = open_dso(dso, machine);
 673		goto out;
 674	}
 675
 676	do {
 677		dso->binary_type = binary_type_data[i++];
 678
 679		dso->data.fd = open_dso(dso, machine);
 680		if (dso->data.fd >= 0)
 681			goto out;
 682
 683	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
 684out:
 685	if (dso->data.fd >= 0)
 686		dso->data.status = DSO_DATA_STATUS_OK;
 687	else
 688		dso->data.status = DSO_DATA_STATUS_ERROR;
 689}
 690
 691/**
 692 * dso__data_get_fd - Get dso's data file descriptor
 693 * @dso: dso object
 694 * @machine: machine object
 695 *
 696 * External interface to find dso's file, open it and
 697 * returns file descriptor.  It should be paired with
 698 * dso__data_put_fd() if it returns non-negative value.
 699 */
 700int dso__data_get_fd(struct dso *dso, struct machine *machine)
 701{
 702	if (dso->data.status == DSO_DATA_STATUS_ERROR)
 703		return -1;
 704
 705	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
 706		return -1;
 707
 708	try_to_open_dso(dso, machine);
 709
 710	if (dso->data.fd < 0)
 711		pthread_mutex_unlock(&dso__data_open_lock);
 712
 713	return dso->data.fd;
 714}
 715
 716void dso__data_put_fd(struct dso *dso __maybe_unused)
 717{
 718	pthread_mutex_unlock(&dso__data_open_lock);
 719}
 720
 721bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
 722{
 723	u32 flag = 1 << by;
 724
 725	if (dso->data.status_seen & flag)
 726		return true;
 727
 728	dso->data.status_seen |= flag;
 729
 730	return false;
 731}
 732
 
 733static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
 734{
 735	struct bpf_prog_info_node *node;
 736	ssize_t size = DSO__DATA_CACHE_SIZE;
 737	u64 len;
 738	u8 *buf;
 739
 740	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
 741	if (!node || !node->info_linear) {
 742		dso->data.status = DSO_DATA_STATUS_ERROR;
 743		return -1;
 744	}
 745
 746	len = node->info_linear->info.jited_prog_len;
 747	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
 748
 749	if (offset >= len)
 750		return -1;
 751
 752	size = (ssize_t)min(len - offset, (u64)size);
 753	memcpy(data, buf + offset, size);
 754	return size;
 755}
 756
 757static int bpf_size(struct dso *dso)
 758{
 759	struct bpf_prog_info_node *node;
 760
 761	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
 762	if (!node || !node->info_linear) {
 763		dso->data.status = DSO_DATA_STATUS_ERROR;
 764		return -1;
 765	}
 766
 767	dso->data.file_size = node->info_linear->info.jited_prog_len;
 768	return 0;
 769}
 
 770
 771static void
 772dso_cache__free(struct dso *dso)
 773{
 774	struct rb_root *root = &dso->data.cache;
 775	struct rb_node *next = rb_first(root);
 776
 777	pthread_mutex_lock(&dso->lock);
 778	while (next) {
 779		struct dso_cache *cache;
 780
 781		cache = rb_entry(next, struct dso_cache, rb_node);
 782		next = rb_next(&cache->rb_node);
 783		rb_erase(&cache->rb_node, root);
 784		free(cache);
 785	}
 786	pthread_mutex_unlock(&dso->lock);
 787}
 788
 789static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
 790{
 791	const struct rb_root *root = &dso->data.cache;
 792	struct rb_node * const *p = &root->rb_node;
 793	const struct rb_node *parent = NULL;
 794	struct dso_cache *cache;
 795
 796	while (*p != NULL) {
 797		u64 end;
 798
 799		parent = *p;
 800		cache = rb_entry(parent, struct dso_cache, rb_node);
 801		end = cache->offset + DSO__DATA_CACHE_SIZE;
 802
 803		if (offset < cache->offset)
 804			p = &(*p)->rb_left;
 805		else if (offset >= end)
 806			p = &(*p)->rb_right;
 807		else
 808			return cache;
 809	}
 810
 811	return NULL;
 812}
 813
 814static struct dso_cache *
 815dso_cache__insert(struct dso *dso, struct dso_cache *new)
 816{
 817	struct rb_root *root = &dso->data.cache;
 818	struct rb_node **p = &root->rb_node;
 819	struct rb_node *parent = NULL;
 820	struct dso_cache *cache;
 821	u64 offset = new->offset;
 822
 823	pthread_mutex_lock(&dso->lock);
 824	while (*p != NULL) {
 825		u64 end;
 826
 827		parent = *p;
 828		cache = rb_entry(parent, struct dso_cache, rb_node);
 829		end = cache->offset + DSO__DATA_CACHE_SIZE;
 830
 831		if (offset < cache->offset)
 832			p = &(*p)->rb_left;
 833		else if (offset >= end)
 834			p = &(*p)->rb_right;
 835		else
 836			goto out;
 837	}
 838
 839	rb_link_node(&new->rb_node, parent, p);
 840	rb_insert_color(&new->rb_node, root);
 841
 842	cache = NULL;
 843out:
 844	pthread_mutex_unlock(&dso->lock);
 845	return cache;
 846}
 847
 848static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
 849				 u64 size, bool out)
 850{
 851	u64 cache_offset = offset - cache->offset;
 852	u64 cache_size   = min(cache->size - cache_offset, size);
 853
 854	if (out)
 855		memcpy(data, cache->data + cache_offset, cache_size);
 856	else
 857		memcpy(cache->data + cache_offset, data, cache_size);
 858	return cache_size;
 859}
 860
 861static ssize_t file_read(struct dso *dso, struct machine *machine,
 862			 u64 offset, char *data)
 863{
 864	ssize_t ret;
 865
 866	pthread_mutex_lock(&dso__data_open_lock);
 867
 868	/*
 869	 * dso->data.fd might be closed if other thread opened another
 870	 * file (dso) due to open file limit (RLIMIT_NOFILE).
 871	 */
 872	try_to_open_dso(dso, machine);
 873
 874	if (dso->data.fd < 0) {
 875		dso->data.status = DSO_DATA_STATUS_ERROR;
 876		ret = -errno;
 877		goto out;
 878	}
 879
 880	ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
 881out:
 882	pthread_mutex_unlock(&dso__data_open_lock);
 883	return ret;
 884}
 885
 886static struct dso_cache *dso_cache__populate(struct dso *dso,
 887					     struct machine *machine,
 888					     u64 offset, ssize_t *ret)
 889{
 890	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
 891	struct dso_cache *cache;
 892	struct dso_cache *old;
 893
 894	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
 895	if (!cache) {
 896		*ret = -ENOMEM;
 897		return NULL;
 898	}
 899
 900	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
 901		*ret = bpf_read(dso, cache_offset, cache->data);
 902	else if (dso->binary_type == DSO_BINARY_TYPE__OOL)
 
 
 903		*ret = DSO__DATA_CACHE_SIZE;
 904	else
 905		*ret = file_read(dso, machine, cache_offset, cache->data);
 906
 907	if (*ret <= 0) {
 908		free(cache);
 909		return NULL;
 910	}
 911
 912	cache->offset = cache_offset;
 913	cache->size   = *ret;
 914
 915	old = dso_cache__insert(dso, cache);
 916	if (old) {
 917		/* we lose the race */
 918		free(cache);
 919		cache = old;
 920	}
 921
 922	return cache;
 923}
 924
 925static struct dso_cache *dso_cache__find(struct dso *dso,
 926					 struct machine *machine,
 927					 u64 offset,
 928					 ssize_t *ret)
 929{
 930	struct dso_cache *cache = __dso_cache__find(dso, offset);
 931
 932	return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
 933}
 934
 935static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
 936			    u64 offset, u8 *data, ssize_t size, bool out)
 937{
 938	struct dso_cache *cache;
 939	ssize_t ret = 0;
 940
 941	cache = dso_cache__find(dso, machine, offset, &ret);
 942	if (!cache)
 943		return ret;
 944
 945	return dso_cache__memcpy(cache, offset, data, size, out);
 946}
 947
 948/*
 949 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
 950 * in the rb_tree. Any read to already cached data is served
 951 * by cached data. Writes update the cache only, not the backing file.
 952 */
 953static ssize_t cached_io(struct dso *dso, struct machine *machine,
 954			 u64 offset, u8 *data, ssize_t size, bool out)
 955{
 956	ssize_t r = 0;
 957	u8 *p = data;
 958
 959	do {
 960		ssize_t ret;
 961
 962		ret = dso_cache_io(dso, machine, offset, p, size, out);
 963		if (ret < 0)
 964			return ret;
 965
 966		/* Reached EOF, return what we have. */
 967		if (!ret)
 968			break;
 969
 970		BUG_ON(ret > size);
 971
 972		r      += ret;
 973		p      += ret;
 974		offset += ret;
 975		size   -= ret;
 976
 977	} while (size);
 978
 979	return r;
 980}
 981
 982static int file_size(struct dso *dso, struct machine *machine)
 983{
 984	int ret = 0;
 985	struct stat st;
 986	char sbuf[STRERR_BUFSIZE];
 987
 988	pthread_mutex_lock(&dso__data_open_lock);
 989
 990	/*
 991	 * dso->data.fd might be closed if other thread opened another
 992	 * file (dso) due to open file limit (RLIMIT_NOFILE).
 993	 */
 994	try_to_open_dso(dso, machine);
 995
 996	if (dso->data.fd < 0) {
 997		ret = -errno;
 998		dso->data.status = DSO_DATA_STATUS_ERROR;
 999		goto out;
1000	}
1001
1002	if (fstat(dso->data.fd, &st) < 0) {
1003		ret = -errno;
1004		pr_err("dso cache fstat failed: %s\n",
1005		       str_error_r(errno, sbuf, sizeof(sbuf)));
1006		dso->data.status = DSO_DATA_STATUS_ERROR;
1007		goto out;
1008	}
1009	dso->data.file_size = st.st_size;
1010
1011out:
1012	pthread_mutex_unlock(&dso__data_open_lock);
1013	return ret;
1014}
1015
1016int dso__data_file_size(struct dso *dso, struct machine *machine)
1017{
1018	if (dso->data.file_size)
1019		return 0;
1020
1021	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1022		return -1;
1023
1024	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
1025		return bpf_size(dso);
1026
1027	return file_size(dso, machine);
1028}
1029
1030/**
1031 * dso__data_size - Return dso data size
1032 * @dso: dso object
1033 * @machine: machine object
1034 *
1035 * Return: dso data size
1036 */
1037off_t dso__data_size(struct dso *dso, struct machine *machine)
1038{
1039	if (dso__data_file_size(dso, machine))
1040		return -1;
1041
1042	/* For now just estimate dso data size is close to file size */
1043	return dso->data.file_size;
1044}
1045
1046static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1047				      u64 offset, u8 *data, ssize_t size,
1048				      bool out)
1049{
1050	if (dso__data_file_size(dso, machine))
1051		return -1;
1052
1053	/* Check the offset sanity. */
1054	if (offset > dso->data.file_size)
1055		return -1;
1056
1057	if (offset + size < offset)
1058		return -1;
1059
1060	return cached_io(dso, machine, offset, data, size, out);
1061}
1062
1063/**
1064 * dso__data_read_offset - Read data from dso file offset
1065 * @dso: dso object
1066 * @machine: machine object
1067 * @offset: file offset
1068 * @data: buffer to store data
1069 * @size: size of the @data buffer
1070 *
1071 * External interface to read data from dso file offset. Open
1072 * dso data file and use cached_read to get the data.
1073 */
1074ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1075			      u64 offset, u8 *data, ssize_t size)
1076{
1077	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1078		return -1;
1079
1080	return data_read_write_offset(dso, machine, offset, data, size, true);
1081}
1082
1083/**
1084 * dso__data_read_addr - Read data from dso address
1085 * @dso: dso object
1086 * @machine: machine object
1087 * @add: virtual memory address
1088 * @data: buffer to store data
1089 * @size: size of the @data buffer
1090 *
1091 * External interface to read data from dso address.
1092 */
1093ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1094			    struct machine *machine, u64 addr,
1095			    u8 *data, ssize_t size)
1096{
1097	u64 offset = map->map_ip(map, addr);
1098	return dso__data_read_offset(dso, machine, offset, data, size);
1099}
1100
1101/**
1102 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1103 * @dso: dso object
1104 * @machine: machine object
1105 * @offset: file offset
1106 * @data: buffer to write
1107 * @size: size of the @data buffer
1108 *
1109 * Write into the dso file data cache, but do not change the file itself.
1110 */
1111ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1112				   u64 offset, const u8 *data_in, ssize_t size)
1113{
1114	u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1115
1116	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1117		return -1;
1118
1119	return data_read_write_offset(dso, machine, offset, data, size, false);
1120}
1121
1122/**
1123 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1124 * @dso: dso object
1125 * @machine: machine object
1126 * @add: virtual memory address
1127 * @data: buffer to write
1128 * @size: size of the @data buffer
1129 *
1130 * External interface to write into the dso file data cache, but do not change
1131 * the file itself.
1132 */
1133ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1134				   struct machine *machine, u64 addr,
1135				   const u8 *data, ssize_t size)
1136{
1137	u64 offset = map->map_ip(map, addr);
1138	return dso__data_write_cache_offs(dso, machine, offset, data, size);
1139}
1140
1141struct map *dso__new_map(const char *name)
1142{
1143	struct map *map = NULL;
1144	struct dso *dso = dso__new(name);
1145
1146	if (dso)
1147		map = map__new2(0, dso);
 
 
1148
1149	return map;
1150}
1151
1152struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1153				    const char *short_name, int dso_type)
1154{
1155	/*
1156	 * The kernel dso could be created by build_id processing.
1157	 */
1158	struct dso *dso = machine__findnew_dso(machine, name);
1159
1160	/*
1161	 * We need to run this in all cases, since during the build_id
1162	 * processing we had no idea this was the kernel dso.
1163	 */
1164	if (dso != NULL) {
1165		dso__set_short_name(dso, short_name, false);
1166		dso->kernel = dso_type;
1167	}
1168
1169	return dso;
1170}
1171
1172static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
1173{
1174	struct rb_root *root = dso->root;
1175
1176	if (name == NULL)
1177		return;
1178
1179	if (dso->long_name_allocated)
1180		free((char *)dso->long_name);
1181
1182	if (root) {
1183		rb_erase(&dso->rb_node, root);
1184		/*
1185		 * __dsos__findnew_link_by_longname_id() isn't guaranteed to
1186		 * add it back, so a clean removal is required here.
1187		 */
1188		RB_CLEAR_NODE(&dso->rb_node);
1189		dso->root = NULL;
1190	}
1191
1192	dso->long_name		 = name;
1193	dso->long_name_len	 = strlen(name);
1194	dso->long_name_allocated = name_allocated;
1195
1196	if (root)
1197		__dsos__findnew_link_by_longname_id(root, dso, NULL, id);
1198}
1199
1200void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1201{
1202	dso__set_long_name_id(dso, name, NULL, name_allocated);
1203}
1204
1205void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1206{
1207	if (name == NULL)
1208		return;
1209
1210	if (dso->short_name_allocated)
1211		free((char *)dso->short_name);
1212
1213	dso->short_name		  = name;
1214	dso->short_name_len	  = strlen(name);
1215	dso->short_name_allocated = name_allocated;
1216}
1217
1218int dso__name_len(const struct dso *dso)
1219{
1220	if (!dso)
1221		return strlen("[unknown]");
1222	if (verbose > 0)
1223		return dso->long_name_len;
1224
1225	return dso->short_name_len;
1226}
1227
1228bool dso__loaded(const struct dso *dso)
1229{
1230	return dso->loaded;
1231}
1232
1233bool dso__sorted_by_name(const struct dso *dso)
1234{
1235	return dso->sorted_by_name;
1236}
1237
1238void dso__set_sorted_by_name(struct dso *dso)
1239{
1240	dso->sorted_by_name = true;
1241}
1242
1243struct dso *dso__new_id(const char *name, struct dso_id *id)
1244{
1245	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1246
1247	if (dso != NULL) {
1248		strcpy(dso->name, name);
1249		if (id)
1250			dso->id = *id;
1251		dso__set_long_name_id(dso, dso->name, id, false);
1252		dso__set_short_name(dso, dso->name, false);
1253		dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
1254		dso->data.cache = RB_ROOT;
1255		dso->inlined_nodes = RB_ROOT_CACHED;
1256		dso->srclines = RB_ROOT_CACHED;
1257		dso->data.fd = -1;
1258		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1259		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1260		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1261		dso->is_64_bit = (sizeof(void *) == 8);
1262		dso->loaded = 0;
1263		dso->rel = 0;
1264		dso->sorted_by_name = 0;
1265		dso->has_build_id = 0;
1266		dso->has_srcline = 1;
1267		dso->a2l_fails = 1;
1268		dso->kernel = DSO_SPACE__USER;
1269		dso->needs_swap = DSO_SWAP__UNSET;
1270		dso->comp = COMP_ID__NONE;
1271		RB_CLEAR_NODE(&dso->rb_node);
1272		dso->root = NULL;
1273		INIT_LIST_HEAD(&dso->node);
1274		INIT_LIST_HEAD(&dso->data.open_entry);
1275		pthread_mutex_init(&dso->lock, NULL);
1276		refcount_set(&dso->refcnt, 1);
1277	}
1278
1279	return dso;
1280}
1281
1282struct dso *dso__new(const char *name)
1283{
1284	return dso__new_id(name, NULL);
1285}
1286
1287void dso__delete(struct dso *dso)
1288{
1289	if (!RB_EMPTY_NODE(&dso->rb_node))
1290		pr_err("DSO %s is still in rbtree when being deleted!\n",
1291		       dso->long_name);
1292
1293	/* free inlines first, as they reference symbols */
1294	inlines__tree_delete(&dso->inlined_nodes);
1295	srcline__tree_delete(&dso->srclines);
1296	symbols__delete(&dso->symbols);
1297
1298	if (dso->short_name_allocated) {
1299		zfree((char **)&dso->short_name);
1300		dso->short_name_allocated = false;
1301	}
1302
1303	if (dso->long_name_allocated) {
1304		zfree((char **)&dso->long_name);
1305		dso->long_name_allocated = false;
1306	}
1307
1308	dso__data_close(dso);
1309	auxtrace_cache__free(dso->auxtrace_cache);
1310	dso_cache__free(dso);
1311	dso__free_a2l(dso);
1312	zfree(&dso->symsrc_filename);
1313	nsinfo__zput(dso->nsinfo);
1314	pthread_mutex_destroy(&dso->lock);
1315	free(dso);
1316}
1317
1318struct dso *dso__get(struct dso *dso)
1319{
1320	if (dso)
1321		refcount_inc(&dso->refcnt);
1322	return dso;
1323}
1324
1325void dso__put(struct dso *dso)
1326{
1327	if (dso && refcount_dec_and_test(&dso->refcnt))
1328		dso__delete(dso);
1329}
1330
1331void dso__set_build_id(struct dso *dso, void *build_id)
1332{
1333	memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1334	dso->has_build_id = 1;
1335}
1336
1337bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1338{
1339	return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
 
 
 
 
 
 
 
 
 
 
 
1340}
1341
1342void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1343{
1344	char path[PATH_MAX];
1345
1346	if (machine__is_default_guest(machine))
1347		return;
1348	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1349	if (sysfs__read_build_id(path, dso->build_id,
1350				 sizeof(dso->build_id)) == 0)
1351		dso->has_build_id = true;
1352}
1353
1354int dso__kernel_module_get_build_id(struct dso *dso,
1355				    const char *root_dir)
1356{
1357	char filename[PATH_MAX];
1358	/*
1359	 * kernel module short names are of the form "[module]" and
1360	 * we need just "module" here.
1361	 */
1362	const char *name = dso->short_name + 1;
1363
1364	snprintf(filename, sizeof(filename),
1365		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1366		 root_dir, (int)strlen(name) - 1, name);
1367
1368	if (sysfs__read_build_id(filename, dso->build_id,
1369				 sizeof(dso->build_id)) == 0)
1370		dso->has_build_id = true;
1371
1372	return 0;
1373}
1374
1375size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1376{
1377	char sbuild_id[SBUILD_ID_SIZE];
1378
1379	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1380	return fprintf(fp, "%s", sbuild_id);
1381}
1382
1383size_t dso__fprintf(struct dso *dso, FILE *fp)
1384{
1385	struct rb_node *nd;
1386	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1387
1388	if (dso->short_name != dso->long_name)
1389		ret += fprintf(fp, "%s, ", dso->long_name);
1390	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1391	ret += dso__fprintf_buildid(dso, fp);
1392	ret += fprintf(fp, ")\n");
1393	for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1394		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1395		ret += symbol__fprintf(pos, fp);
1396	}
1397
1398	return ret;
1399}
1400
1401enum dso_type dso__type(struct dso *dso, struct machine *machine)
1402{
1403	int fd;
1404	enum dso_type type = DSO__TYPE_UNKNOWN;
1405
1406	fd = dso__data_get_fd(dso, machine);
1407	if (fd >= 0) {
1408		type = dso__type_fd(fd);
1409		dso__data_put_fd(dso);
1410	}
1411
1412	return type;
1413}
1414
1415int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1416{
1417	int idx, errnum = dso->load_errno;
1418	/*
1419	 * This must have a same ordering as the enum dso_load_errno.
1420	 */
1421	static const char *dso_load__error_str[] = {
1422	"Internal tools/perf/ library error",
1423	"Invalid ELF file",
1424	"Can not read build id",
1425	"Mismatching build id",
1426	"Decompression failure",
1427	};
1428
1429	BUG_ON(buflen == 0);
1430
1431	if (errnum >= 0) {
1432		const char *err = str_error_r(errnum, buf, buflen);
1433
1434		if (err != buf)
1435			scnprintf(buf, buflen, "%s", err);
1436
1437		return 0;
1438	}
1439
1440	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1441		return -1;
1442
1443	idx = errnum - __DSO_LOAD_ERRNO__START;
1444	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1445	return 0;
1446}