Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <asm/bug.h>
   3#include <linux/kernel.h>
   4#include <linux/string.h>
   5#include <linux/zalloc.h>
   6#include <sys/time.h>
   7#include <sys/resource.h>
   8#include <sys/types.h>
   9#include <sys/stat.h>
  10#include <unistd.h>
  11#include <errno.h>
  12#include <fcntl.h>
  13#include <stdlib.h>
  14#ifdef HAVE_LIBBPF_SUPPORT
  15#include <bpf/libbpf.h>
  16#include "bpf-event.h"
  17#include "bpf-utils.h"
  18#endif
  19#include "compress.h"
  20#include "env.h"
  21#include "namespaces.h"
  22#include "path.h"
  23#include "map.h"
  24#include "symbol.h"
  25#include "srcline.h"
  26#include "dso.h"
  27#include "dsos.h"
  28#include "machine.h"
  29#include "auxtrace.h"
  30#include "util.h" /* O_CLOEXEC for older systems */
  31#include "debug.h"
  32#include "string2.h"
  33#include "vdso.h"
 
  34
  35static const char * const debuglink_paths[] = {
  36	"%.0s%s",
  37	"%s/%s",
  38	"%s/.debug/%s",
  39	"/usr/lib/debug%s/%s"
  40};
  41
 
 
 
 
 
 
  42char dso__symtab_origin(const struct dso *dso)
  43{
  44	static const char origin[] = {
  45		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
  46		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
  47		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
  48		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
  49		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
  50		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
  51		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
  52		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
  53		[DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO]	= 'x',
  54		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
  55		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
  56		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
  57		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
  58		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
  59		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
  60		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
  61		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
  62		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
  63	};
  64
  65	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
  66		return '!';
  67	return origin[dso->symtab_type];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68}
  69
  70int dso__read_binary_type_filename(const struct dso *dso,
  71				   enum dso_binary_type type,
  72				   char *root_dir, char *filename, size_t size)
  73{
  74	char build_id_hex[SBUILD_ID_SIZE];
  75	int ret = 0;
  76	size_t len;
  77
  78	switch (type) {
  79	case DSO_BINARY_TYPE__DEBUGLINK:
  80	{
  81		const char *last_slash;
  82		char dso_dir[PATH_MAX];
  83		char symfile[PATH_MAX];
  84		unsigned int i;
  85
  86		len = __symbol__join_symfs(filename, size, dso->long_name);
  87		last_slash = filename + len;
  88		while (last_slash != filename && *last_slash != '/')
  89			last_slash--;
  90
  91		strncpy(dso_dir, filename, last_slash - filename);
  92		dso_dir[last_slash-filename] = '\0';
  93
  94		if (!is_regular_file(filename)) {
  95			ret = -1;
  96			break;
  97		}
  98
  99		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
 100		if (ret)
 101			break;
 102
 103		/* Check predefined locations where debug file might reside */
 104		ret = -1;
 105		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
 106			snprintf(filename, size,
 107					debuglink_paths[i], dso_dir, symfile);
 108			if (is_regular_file(filename)) {
 109				ret = 0;
 110				break;
 111			}
 112		}
 113
 114		break;
 115	}
 116	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
 117		if (dso__build_id_filename(dso, filename, size, false) == NULL)
 118			ret = -1;
 119		break;
 120
 121	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
 122		if (dso__build_id_filename(dso, filename, size, true) == NULL)
 123			ret = -1;
 124		break;
 125
 126	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
 127		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 128		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
 129		break;
 130
 131	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
 132		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 133		snprintf(filename + len, size - len, "%s", dso->long_name);
 134		break;
 135
 136	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
 137		/*
 138		 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
 139		 * /usr/lib/debug/lib when it is expected to be in
 140		 * /usr/lib/debug/usr/lib
 141		 */
 142		if (strlen(dso->long_name) < 9 ||
 143		    strncmp(dso->long_name, "/usr/lib/", 9)) {
 144			ret = -1;
 145			break;
 146		}
 147		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 148		snprintf(filename + len, size - len, "%s", dso->long_name + 4);
 149		break;
 150
 151	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
 152	{
 153		const char *last_slash;
 154		size_t dir_size;
 155
 156		last_slash = dso->long_name + dso->long_name_len;
 157		while (last_slash != dso->long_name && *last_slash != '/')
 158			last_slash--;
 159
 160		len = __symbol__join_symfs(filename, size, "");
 161		dir_size = last_slash - dso->long_name + 2;
 162		if (dir_size > (size - len)) {
 163			ret = -1;
 164			break;
 165		}
 166		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
 167		len += scnprintf(filename + len , size - len, ".debug%s",
 168								last_slash);
 169		break;
 170	}
 171
 172	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
 173		if (!dso->has_build_id) {
 174			ret = -1;
 175			break;
 176		}
 177
 178		build_id__sprintf(&dso->bid, build_id_hex);
 179		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
 180		snprintf(filename + len, size - len, "%.2s/%s.debug",
 181			 build_id_hex, build_id_hex + 2);
 182		break;
 183
 184	case DSO_BINARY_TYPE__VMLINUX:
 185	case DSO_BINARY_TYPE__GUEST_VMLINUX:
 186	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
 187		__symbol__join_symfs(filename, size, dso->long_name);
 188		break;
 189
 190	case DSO_BINARY_TYPE__GUEST_KMODULE:
 191	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
 192		path__join3(filename, size, symbol_conf.symfs,
 193			    root_dir, dso->long_name);
 194		break;
 195
 196	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
 197	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
 198		__symbol__join_symfs(filename, size, dso->long_name);
 199		break;
 200
 201	case DSO_BINARY_TYPE__KCORE:
 202	case DSO_BINARY_TYPE__GUEST_KCORE:
 203		snprintf(filename, size, "%s", dso->long_name);
 204		break;
 205
 206	default:
 207	case DSO_BINARY_TYPE__KALLSYMS:
 208	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
 209	case DSO_BINARY_TYPE__JAVA_JIT:
 210	case DSO_BINARY_TYPE__BPF_PROG_INFO:
 211	case DSO_BINARY_TYPE__BPF_IMAGE:
 212	case DSO_BINARY_TYPE__OOL:
 213	case DSO_BINARY_TYPE__NOT_FOUND:
 214		ret = -1;
 215		break;
 216	}
 217
 218	return ret;
 219}
 220
 221enum {
 222	COMP_ID__NONE = 0,
 223};
 224
 225static const struct {
 226	const char *fmt;
 227	int (*decompress)(const char *input, int output);
 228	bool (*is_compressed)(const char *input);
 229} compressions[] = {
 230	[COMP_ID__NONE] = { .fmt = NULL, },
 231#ifdef HAVE_ZLIB_SUPPORT
 232	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
 233#endif
 234#ifdef HAVE_LZMA_SUPPORT
 235	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
 236#endif
 237	{ NULL, NULL, NULL },
 238};
 239
 240static int is_supported_compression(const char *ext)
 241{
 242	unsigned i;
 243
 244	for (i = 1; compressions[i].fmt; i++) {
 245		if (!strcmp(ext, compressions[i].fmt))
 246			return i;
 247	}
 248	return COMP_ID__NONE;
 249}
 250
 251bool is_kernel_module(const char *pathname, int cpumode)
 252{
 253	struct kmod_path m;
 254	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
 255
 256	WARN_ONCE(mode != cpumode,
 257		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
 258		  cpumode);
 259
 260	switch (mode) {
 261	case PERF_RECORD_MISC_USER:
 262	case PERF_RECORD_MISC_HYPERVISOR:
 263	case PERF_RECORD_MISC_GUEST_USER:
 264		return false;
 265	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
 266	default:
 267		if (kmod_path__parse(&m, pathname)) {
 268			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
 269					pathname);
 270			return true;
 271		}
 272	}
 273
 274	return m.kmod;
 275}
 276
 277bool dso__needs_decompress(struct dso *dso)
 278{
 279	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
 280		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 281}
 282
 283int filename__decompress(const char *name, char *pathname,
 284			 size_t len, int comp, int *err)
 285{
 286	char tmpbuf[] = KMOD_DECOMP_NAME;
 287	int fd = -1;
 288
 289	/*
 290	 * We have proper compression id for DSO and yet the file
 291	 * behind the 'name' can still be plain uncompressed object.
 292	 *
 293	 * The reason is behind the logic we open the DSO object files,
 294	 * when we try all possible 'debug' objects until we find the
 295	 * data. So even if the DSO is represented by 'krava.xz' module,
 296	 * we can end up here opening ~/.debug/....23432432/debug' file
 297	 * which is not compressed.
 298	 *
 299	 * To keep this transparent, we detect this and return the file
 300	 * descriptor to the uncompressed file.
 301	 */
 302	if (!compressions[comp].is_compressed(name))
 303		return open(name, O_RDONLY);
 304
 305	fd = mkstemp(tmpbuf);
 306	if (fd < 0) {
 307		*err = errno;
 308		return -1;
 309	}
 310
 311	if (compressions[comp].decompress(name, fd)) {
 312		*err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
 313		close(fd);
 314		fd = -1;
 315	}
 316
 317	if (!pathname || (fd < 0))
 318		unlink(tmpbuf);
 319
 320	if (pathname && (fd >= 0))
 321		strlcpy(pathname, tmpbuf, len);
 322
 323	return fd;
 324}
 325
 326static int decompress_kmodule(struct dso *dso, const char *name,
 327			      char *pathname, size_t len)
 328{
 329	if (!dso__needs_decompress(dso))
 330		return -1;
 331
 332	if (dso->comp == COMP_ID__NONE)
 333		return -1;
 334
 335	return filename__decompress(name, pathname, len, dso->comp,
 336				    &dso->load_errno);
 337}
 338
 339int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
 340{
 341	return decompress_kmodule(dso, name, NULL, 0);
 342}
 343
 344int dso__decompress_kmodule_path(struct dso *dso, const char *name,
 345				 char *pathname, size_t len)
 346{
 347	int fd = decompress_kmodule(dso, name, pathname, len);
 348
 349	close(fd);
 350	return fd >= 0 ? 0 : -1;
 351}
 352
 353/*
 354 * Parses kernel module specified in @path and updates
 355 * @m argument like:
 356 *
 357 *    @comp - true if @path contains supported compression suffix,
 358 *            false otherwise
 359 *    @kmod - true if @path contains '.ko' suffix in right position,
 360 *            false otherwise
 361 *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
 362 *            of the kernel module without suffixes, otherwise strudup-ed
 363 *            base name of @path
 364 *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
 365 *            the compression suffix
 366 *
 367 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
 368 */
 369int __kmod_path__parse(struct kmod_path *m, const char *path,
 370		       bool alloc_name)
 371{
 372	const char *name = strrchr(path, '/');
 373	const char *ext  = strrchr(path, '.');
 374	bool is_simple_name = false;
 375
 376	memset(m, 0x0, sizeof(*m));
 377	name = name ? name + 1 : path;
 378
 379	/*
 380	 * '.' is also a valid character for module name. For example:
 381	 * [aaa.bbb] is a valid module name. '[' should have higher
 382	 * priority than '.ko' suffix.
 383	 *
 384	 * The kernel names are from machine__mmap_name. Such
 385	 * name should belong to kernel itself, not kernel module.
 386	 */
 387	if (name[0] == '[') {
 388		is_simple_name = true;
 389		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
 390		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
 391		    (strncmp(name, "[vdso]", 6) == 0) ||
 392		    (strncmp(name, "[vdso32]", 8) == 0) ||
 393		    (strncmp(name, "[vdsox32]", 9) == 0) ||
 394		    (strncmp(name, "[vsyscall]", 10) == 0)) {
 395			m->kmod = false;
 396
 397		} else
 398			m->kmod = true;
 399	}
 400
 401	/* No extension, just return name. */
 402	if ((ext == NULL) || is_simple_name) {
 403		if (alloc_name) {
 404			m->name = strdup(name);
 405			return m->name ? 0 : -ENOMEM;
 406		}
 407		return 0;
 408	}
 409
 410	m->comp = is_supported_compression(ext + 1);
 411	if (m->comp > COMP_ID__NONE)
 412		ext -= 3;
 413
 414	/* Check .ko extension only if there's enough name left. */
 415	if (ext > name)
 416		m->kmod = !strncmp(ext, ".ko", 3);
 417
 418	if (alloc_name) {
 419		if (m->kmod) {
 420			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
 421				return -ENOMEM;
 422		} else {
 423			if (asprintf(&m->name, "%s", name) == -1)
 424				return -ENOMEM;
 425		}
 426
 427		strreplace(m->name, '-', '_');
 428	}
 429
 430	return 0;
 431}
 432
 433void dso__set_module_info(struct dso *dso, struct kmod_path *m,
 434			  struct machine *machine)
 435{
 436	if (machine__is_host(machine))
 437		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
 438	else
 439		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
 440
 441	/* _KMODULE_COMP should be next to _KMODULE */
 442	if (m->kmod && m->comp) {
 443		dso->symtab_type++;
 444		dso->comp = m->comp;
 445	}
 446
 
 447	dso__set_short_name(dso, strdup(m->name), true);
 448}
 449
 450/*
 451 * Global list of open DSOs and the counter.
 452 */
 453static LIST_HEAD(dso__data_open);
 454static long dso__data_open_cnt;
 455static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
 456
 457static void dso__list_add(struct dso *dso)
 458{
 459	list_add_tail(&dso->data.open_entry, &dso__data_open);
 
 
 
 
 
 460	dso__data_open_cnt++;
 461}
 462
 463static void dso__list_del(struct dso *dso)
 464{
 465	list_del_init(&dso->data.open_entry);
 
 
 
 466	WARN_ONCE(dso__data_open_cnt <= 0,
 467		  "DSO data fd counter out of bounds.");
 468	dso__data_open_cnt--;
 469}
 470
 471static void close_first_dso(void);
 472
 473static int do_open(char *name)
 474{
 475	int fd;
 476	char sbuf[STRERR_BUFSIZE];
 477
 478	do {
 479		fd = open(name, O_RDONLY|O_CLOEXEC);
 480		if (fd >= 0)
 481			return fd;
 482
 483		pr_debug("dso open failed: %s\n",
 484			 str_error_r(errno, sbuf, sizeof(sbuf)));
 485		if (!dso__data_open_cnt || errno != EMFILE)
 486			break;
 487
 488		close_first_dso();
 489	} while (1);
 490
 491	return -1;
 492}
 493
 
 
 
 
 
 494static int __open_dso(struct dso *dso, struct machine *machine)
 495{
 496	int fd = -EINVAL;
 497	char *root_dir = (char *)"";
 498	char *name = malloc(PATH_MAX);
 499	bool decomp = false;
 500
 501	if (!name)
 502		return -ENOMEM;
 503
 504	mutex_lock(&dso->lock);
 505	if (machine)
 506		root_dir = machine->root_dir;
 507
 508	if (dso__read_binary_type_filename(dso, dso->binary_type,
 509					    root_dir, name, PATH_MAX))
 510		goto out;
 511
 512	if (!is_regular_file(name)) {
 513		char *new_name;
 514
 515		if (errno != ENOENT || dso->nsinfo == NULL)
 516			goto out;
 517
 518		new_name = filename_with_chroot(dso->nsinfo->pid, name);
 519		if (!new_name)
 520			goto out;
 521
 522		free(name);
 523		name = new_name;
 524	}
 525
 526	if (dso__needs_decompress(dso)) {
 527		char newpath[KMOD_DECOMP_LEN];
 528		size_t len = sizeof(newpath);
 529
 530		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
 531			fd = -dso->load_errno;
 532			goto out;
 533		}
 534
 535		decomp = true;
 536		strcpy(name, newpath);
 537	}
 538
 539	fd = do_open(name);
 540
 541	if (decomp)
 542		unlink(name);
 543
 544out:
 545	mutex_unlock(&dso->lock);
 546	free(name);
 547	return fd;
 548}
 549
 550static void check_data_close(void);
 551
 552/**
 553 * dso_close - Open DSO data file
 554 * @dso: dso object
 555 *
 556 * Open @dso's data file descriptor and updates
 557 * list/count of open DSO objects.
 558 */
 559static int open_dso(struct dso *dso, struct machine *machine)
 560{
 561	int fd;
 562	struct nscookie nsc;
 563
 564	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
 565		mutex_lock(&dso->lock);
 566		nsinfo__mountns_enter(dso->nsinfo, &nsc);
 567		mutex_unlock(&dso->lock);
 568	}
 569	fd = __open_dso(dso, machine);
 570	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
 571		nsinfo__mountns_exit(&nsc);
 572
 573	if (fd >= 0) {
 574		dso__list_add(dso);
 575		/*
 576		 * Check if we crossed the allowed number
 577		 * of opened DSOs and close one if needed.
 578		 */
 579		check_data_close();
 580	}
 581
 582	return fd;
 583}
 584
 585static void close_data_fd(struct dso *dso)
 586{
 587	if (dso->data.fd >= 0) {
 588		close(dso->data.fd);
 589		dso->data.fd = -1;
 590		dso->data.file_size = 0;
 591		dso__list_del(dso);
 592	}
 593}
 594
 595/**
 596 * dso_close - Close DSO data file
 597 * @dso: dso object
 598 *
 599 * Close @dso's data file descriptor and updates
 600 * list/count of open DSO objects.
 601 */
 602static void close_dso(struct dso *dso)
 603{
 604	close_data_fd(dso);
 605}
 606
 607static void close_first_dso(void)
 608{
 
 609	struct dso *dso;
 610
 611	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
 
 
 
 
 
 612	close_dso(dso);
 613}
 614
 615static rlim_t get_fd_limit(void)
 616{
 617	struct rlimit l;
 618	rlim_t limit = 0;
 619
 620	/* Allow half of the current open fd limit. */
 621	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
 622		if (l.rlim_cur == RLIM_INFINITY)
 623			limit = l.rlim_cur;
 624		else
 625			limit = l.rlim_cur / 2;
 626	} else {
 627		pr_err("failed to get fd limit\n");
 628		limit = 1;
 629	}
 630
 631	return limit;
 632}
 633
 634static rlim_t fd_limit;
 635
 636/*
 637 * Used only by tests/dso-data.c to reset the environment
 638 * for tests. I dont expect we should change this during
 639 * standard runtime.
 640 */
 641void reset_fd_limit(void)
 642{
 643	fd_limit = 0;
 644}
 645
 646static bool may_cache_fd(void)
 647{
 648	if (!fd_limit)
 649		fd_limit = get_fd_limit();
 650
 651	if (fd_limit == RLIM_INFINITY)
 652		return true;
 653
 654	return fd_limit > (rlim_t) dso__data_open_cnt;
 655}
 656
 657/*
 658 * Check and close LRU dso if we crossed allowed limit
 659 * for opened dso file descriptors. The limit is half
 660 * of the RLIMIT_NOFILE files opened.
 661*/
 662static void check_data_close(void)
 663{
 664	bool cache_fd = may_cache_fd();
 665
 666	if (!cache_fd)
 667		close_first_dso();
 668}
 669
 670/**
 671 * dso__data_close - Close DSO data file
 672 * @dso: dso object
 673 *
 674 * External interface to close @dso's data file descriptor.
 675 */
 676void dso__data_close(struct dso *dso)
 677{
 678	pthread_mutex_lock(&dso__data_open_lock);
 679	close_dso(dso);
 680	pthread_mutex_unlock(&dso__data_open_lock);
 681}
 682
 683static void try_to_open_dso(struct dso *dso, struct machine *machine)
 684{
 685	enum dso_binary_type binary_type_data[] = {
 686		DSO_BINARY_TYPE__BUILD_ID_CACHE,
 687		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
 688		DSO_BINARY_TYPE__NOT_FOUND,
 689	};
 690	int i = 0;
 
 691
 692	if (dso->data.fd >= 0)
 693		return;
 694
 695	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
 696		dso->data.fd = open_dso(dso, machine);
 697		goto out;
 698	}
 699
 700	do {
 701		dso->binary_type = binary_type_data[i++];
 702
 703		dso->data.fd = open_dso(dso, machine);
 704		if (dso->data.fd >= 0)
 705			goto out;
 706
 707	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
 708out:
 709	if (dso->data.fd >= 0)
 710		dso->data.status = DSO_DATA_STATUS_OK;
 711	else
 712		dso->data.status = DSO_DATA_STATUS_ERROR;
 713}
 714
 715/**
 716 * dso__data_get_fd - Get dso's data file descriptor
 717 * @dso: dso object
 718 * @machine: machine object
 719 *
 720 * External interface to find dso's file, open it and
 721 * returns file descriptor.  It should be paired with
 722 * dso__data_put_fd() if it returns non-negative value.
 723 */
 724int dso__data_get_fd(struct dso *dso, struct machine *machine)
 725{
 726	if (dso->data.status == DSO_DATA_STATUS_ERROR)
 727		return -1;
 728
 729	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
 730		return -1;
 731
 732	try_to_open_dso(dso, machine);
 733
 734	if (dso->data.fd < 0)
 735		pthread_mutex_unlock(&dso__data_open_lock);
 736
 737	return dso->data.fd;
 738}
 739
 740void dso__data_put_fd(struct dso *dso __maybe_unused)
 741{
 742	pthread_mutex_unlock(&dso__data_open_lock);
 743}
 744
 745bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
 746{
 747	u32 flag = 1 << by;
 748
 749	if (dso->data.status_seen & flag)
 750		return true;
 751
 752	dso->data.status_seen |= flag;
 753
 754	return false;
 755}
 756
 757#ifdef HAVE_LIBBPF_SUPPORT
 758static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
 759{
 760	struct bpf_prog_info_node *node;
 761	ssize_t size = DSO__DATA_CACHE_SIZE;
 
 762	u64 len;
 763	u8 *buf;
 764
 765	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
 766	if (!node || !node->info_linear) {
 767		dso->data.status = DSO_DATA_STATUS_ERROR;
 768		return -1;
 769	}
 770
 771	len = node->info_linear->info.jited_prog_len;
 772	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
 773
 774	if (offset >= len)
 775		return -1;
 776
 777	size = (ssize_t)min(len - offset, (u64)size);
 778	memcpy(data, buf + offset, size);
 779	return size;
 780}
 781
 782static int bpf_size(struct dso *dso)
 783{
 784	struct bpf_prog_info_node *node;
 
 785
 786	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
 787	if (!node || !node->info_linear) {
 788		dso->data.status = DSO_DATA_STATUS_ERROR;
 789		return -1;
 790	}
 791
 792	dso->data.file_size = node->info_linear->info.jited_prog_len;
 793	return 0;
 794}
 795#endif // HAVE_LIBBPF_SUPPORT
 796
 797static void
 798dso_cache__free(struct dso *dso)
 799{
 800	struct rb_root *root = &dso->data.cache;
 801	struct rb_node *next = rb_first(root);
 802
 803	mutex_lock(&dso->lock);
 804	while (next) {
 805		struct dso_cache *cache;
 806
 807		cache = rb_entry(next, struct dso_cache, rb_node);
 808		next = rb_next(&cache->rb_node);
 809		rb_erase(&cache->rb_node, root);
 810		free(cache);
 811	}
 812	mutex_unlock(&dso->lock);
 813}
 814
 815static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
 816{
 817	const struct rb_root *root = &dso->data.cache;
 818	struct rb_node * const *p = &root->rb_node;
 819	const struct rb_node *parent = NULL;
 820	struct dso_cache *cache;
 821
 822	while (*p != NULL) {
 823		u64 end;
 824
 825		parent = *p;
 826		cache = rb_entry(parent, struct dso_cache, rb_node);
 827		end = cache->offset + DSO__DATA_CACHE_SIZE;
 828
 829		if (offset < cache->offset)
 830			p = &(*p)->rb_left;
 831		else if (offset >= end)
 832			p = &(*p)->rb_right;
 833		else
 834			return cache;
 835	}
 836
 837	return NULL;
 838}
 839
 840static struct dso_cache *
 841dso_cache__insert(struct dso *dso, struct dso_cache *new)
 842{
 843	struct rb_root *root = &dso->data.cache;
 844	struct rb_node **p = &root->rb_node;
 845	struct rb_node *parent = NULL;
 846	struct dso_cache *cache;
 847	u64 offset = new->offset;
 848
 849	mutex_lock(&dso->lock);
 850	while (*p != NULL) {
 851		u64 end;
 852
 853		parent = *p;
 854		cache = rb_entry(parent, struct dso_cache, rb_node);
 855		end = cache->offset + DSO__DATA_CACHE_SIZE;
 856
 857		if (offset < cache->offset)
 858			p = &(*p)->rb_left;
 859		else if (offset >= end)
 860			p = &(*p)->rb_right;
 861		else
 862			goto out;
 863	}
 864
 865	rb_link_node(&new->rb_node, parent, p);
 866	rb_insert_color(&new->rb_node, root);
 867
 868	cache = NULL;
 869out:
 870	mutex_unlock(&dso->lock);
 871	return cache;
 872}
 873
 874static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
 875				 u64 size, bool out)
 876{
 877	u64 cache_offset = offset - cache->offset;
 878	u64 cache_size   = min(cache->size - cache_offset, size);
 879
 880	if (out)
 881		memcpy(data, cache->data + cache_offset, cache_size);
 882	else
 883		memcpy(cache->data + cache_offset, data, cache_size);
 884	return cache_size;
 885}
 886
 887static ssize_t file_read(struct dso *dso, struct machine *machine,
 888			 u64 offset, char *data)
 889{
 890	ssize_t ret;
 891
 892	pthread_mutex_lock(&dso__data_open_lock);
 893
 894	/*
 895	 * dso->data.fd might be closed if other thread opened another
 896	 * file (dso) due to open file limit (RLIMIT_NOFILE).
 897	 */
 898	try_to_open_dso(dso, machine);
 899
 900	if (dso->data.fd < 0) {
 901		dso->data.status = DSO_DATA_STATUS_ERROR;
 902		ret = -errno;
 903		goto out;
 904	}
 905
 906	ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
 907out:
 908	pthread_mutex_unlock(&dso__data_open_lock);
 909	return ret;
 910}
 911
 912static struct dso_cache *dso_cache__populate(struct dso *dso,
 913					     struct machine *machine,
 914					     u64 offset, ssize_t *ret)
 915{
 916	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
 917	struct dso_cache *cache;
 918	struct dso_cache *old;
 919
 920	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
 921	if (!cache) {
 922		*ret = -ENOMEM;
 923		return NULL;
 924	}
 925#ifdef HAVE_LIBBPF_SUPPORT
 926	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
 927		*ret = bpf_read(dso, cache_offset, cache->data);
 928	else
 929#endif
 930	if (dso->binary_type == DSO_BINARY_TYPE__OOL)
 931		*ret = DSO__DATA_CACHE_SIZE;
 932	else
 933		*ret = file_read(dso, machine, cache_offset, cache->data);
 934
 935	if (*ret <= 0) {
 936		free(cache);
 937		return NULL;
 938	}
 939
 940	cache->offset = cache_offset;
 941	cache->size   = *ret;
 942
 943	old = dso_cache__insert(dso, cache);
 944	if (old) {
 945		/* we lose the race */
 946		free(cache);
 947		cache = old;
 948	}
 949
 950	return cache;
 951}
 952
 953static struct dso_cache *dso_cache__find(struct dso *dso,
 954					 struct machine *machine,
 955					 u64 offset,
 956					 ssize_t *ret)
 957{
 958	struct dso_cache *cache = __dso_cache__find(dso, offset);
 959
 960	return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
 961}
 962
 963static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
 964			    u64 offset, u8 *data, ssize_t size, bool out)
 965{
 966	struct dso_cache *cache;
 967	ssize_t ret = 0;
 968
 969	cache = dso_cache__find(dso, machine, offset, &ret);
 970	if (!cache)
 971		return ret;
 972
 973	return dso_cache__memcpy(cache, offset, data, size, out);
 974}
 975
 976/*
 977 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
 978 * in the rb_tree. Any read to already cached data is served
 979 * by cached data. Writes update the cache only, not the backing file.
 980 */
 981static ssize_t cached_io(struct dso *dso, struct machine *machine,
 982			 u64 offset, u8 *data, ssize_t size, bool out)
 983{
 984	ssize_t r = 0;
 985	u8 *p = data;
 986
 987	do {
 988		ssize_t ret;
 989
 990		ret = dso_cache_io(dso, machine, offset, p, size, out);
 991		if (ret < 0)
 992			return ret;
 993
 994		/* Reached EOF, return what we have. */
 995		if (!ret)
 996			break;
 997
 998		BUG_ON(ret > size);
 999
1000		r      += ret;
1001		p      += ret;
1002		offset += ret;
1003		size   -= ret;
1004
1005	} while (size);
1006
1007	return r;
1008}
1009
1010static int file_size(struct dso *dso, struct machine *machine)
1011{
1012	int ret = 0;
1013	struct stat st;
1014	char sbuf[STRERR_BUFSIZE];
1015
1016	pthread_mutex_lock(&dso__data_open_lock);
1017
1018	/*
1019	 * dso->data.fd might be closed if other thread opened another
1020	 * file (dso) due to open file limit (RLIMIT_NOFILE).
1021	 */
1022	try_to_open_dso(dso, machine);
1023
1024	if (dso->data.fd < 0) {
1025		ret = -errno;
1026		dso->data.status = DSO_DATA_STATUS_ERROR;
1027		goto out;
1028	}
1029
1030	if (fstat(dso->data.fd, &st) < 0) {
1031		ret = -errno;
1032		pr_err("dso cache fstat failed: %s\n",
1033		       str_error_r(errno, sbuf, sizeof(sbuf)));
1034		dso->data.status = DSO_DATA_STATUS_ERROR;
1035		goto out;
1036	}
1037	dso->data.file_size = st.st_size;
1038
1039out:
1040	pthread_mutex_unlock(&dso__data_open_lock);
1041	return ret;
1042}
1043
1044int dso__data_file_size(struct dso *dso, struct machine *machine)
1045{
1046	if (dso->data.file_size)
1047		return 0;
1048
1049	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1050		return -1;
1051#ifdef HAVE_LIBBPF_SUPPORT
1052	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
1053		return bpf_size(dso);
1054#endif
1055	return file_size(dso, machine);
1056}
1057
1058/**
1059 * dso__data_size - Return dso data size
1060 * @dso: dso object
1061 * @machine: machine object
1062 *
1063 * Return: dso data size
1064 */
1065off_t dso__data_size(struct dso *dso, struct machine *machine)
1066{
1067	if (dso__data_file_size(dso, machine))
1068		return -1;
1069
1070	/* For now just estimate dso data size is close to file size */
1071	return dso->data.file_size;
1072}
1073
1074static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1075				      u64 offset, u8 *data, ssize_t size,
1076				      bool out)
1077{
1078	if (dso__data_file_size(dso, machine))
1079		return -1;
1080
1081	/* Check the offset sanity. */
1082	if (offset > dso->data.file_size)
1083		return -1;
1084
1085	if (offset + size < offset)
1086		return -1;
1087
1088	return cached_io(dso, machine, offset, data, size, out);
1089}
1090
1091/**
1092 * dso__data_read_offset - Read data from dso file offset
1093 * @dso: dso object
1094 * @machine: machine object
1095 * @offset: file offset
1096 * @data: buffer to store data
1097 * @size: size of the @data buffer
1098 *
1099 * External interface to read data from dso file offset. Open
1100 * dso data file and use cached_read to get the data.
1101 */
1102ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1103			      u64 offset, u8 *data, ssize_t size)
1104{
1105	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1106		return -1;
1107
1108	return data_read_write_offset(dso, machine, offset, data, size, true);
1109}
1110
1111/**
1112 * dso__data_read_addr - Read data from dso address
1113 * @dso: dso object
1114 * @machine: machine object
1115 * @add: virtual memory address
1116 * @data: buffer to store data
1117 * @size: size of the @data buffer
1118 *
1119 * External interface to read data from dso address.
1120 */
1121ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1122			    struct machine *machine, u64 addr,
1123			    u8 *data, ssize_t size)
1124{
1125	u64 offset = map->map_ip(map, addr);
 
1126	return dso__data_read_offset(dso, machine, offset, data, size);
1127}
1128
1129/**
1130 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1131 * @dso: dso object
1132 * @machine: machine object
1133 * @offset: file offset
1134 * @data: buffer to write
1135 * @size: size of the @data buffer
1136 *
1137 * Write into the dso file data cache, but do not change the file itself.
1138 */
1139ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1140				   u64 offset, const u8 *data_in, ssize_t size)
1141{
1142	u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1143
1144	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1145		return -1;
1146
1147	return data_read_write_offset(dso, machine, offset, data, size, false);
1148}
1149
1150/**
1151 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1152 * @dso: dso object
1153 * @machine: machine object
1154 * @add: virtual memory address
1155 * @data: buffer to write
1156 * @size: size of the @data buffer
1157 *
1158 * External interface to write into the dso file data cache, but do not change
1159 * the file itself.
1160 */
1161ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1162				   struct machine *machine, u64 addr,
1163				   const u8 *data, ssize_t size)
1164{
1165	u64 offset = map->map_ip(map, addr);
 
1166	return dso__data_write_cache_offs(dso, machine, offset, data, size);
1167}
1168
1169struct map *dso__new_map(const char *name)
1170{
1171	struct map *map = NULL;
1172	struct dso *dso = dso__new(name);
1173
1174	if (dso) {
1175		map = map__new2(0, dso);
1176		dso__put(dso);
1177	}
1178
1179	return map;
1180}
1181
1182struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1183				    const char *short_name, int dso_type)
1184{
1185	/*
1186	 * The kernel dso could be created by build_id processing.
1187	 */
1188	struct dso *dso = machine__findnew_dso(machine, name);
1189
1190	/*
1191	 * We need to run this in all cases, since during the build_id
1192	 * processing we had no idea this was the kernel dso.
1193	 */
1194	if (dso != NULL) {
1195		dso__set_short_name(dso, short_name, false);
1196		dso->kernel = dso_type;
1197	}
1198
1199	return dso;
1200}
1201
1202static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
1203{
1204	struct rb_root *root = dso->root;
1205
1206	if (name == NULL)
1207		return;
1208
1209	if (dso->long_name_allocated)
1210		free((char *)dso->long_name);
1211
1212	if (root) {
1213		rb_erase(&dso->rb_node, root);
1214		/*
1215		 * __dsos__findnew_link_by_longname_id() isn't guaranteed to
1216		 * add it back, so a clean removal is required here.
1217		 */
1218		RB_CLEAR_NODE(&dso->rb_node);
1219		dso->root = NULL;
1220	}
1221
1222	dso->long_name		 = name;
1223	dso->long_name_len	 = strlen(name);
1224	dso->long_name_allocated = name_allocated;
1225
1226	if (root)
1227		__dsos__findnew_link_by_longname_id(root, dso, NULL, id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1228}
1229
1230void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1231{
1232	dso__set_long_name_id(dso, name, NULL, name_allocated);
1233}
1234
1235void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1236{
 
 
1237	if (name == NULL)
1238		return;
1239
1240	if (dso->short_name_allocated)
1241		free((char *)dso->short_name);
 
 
 
 
 
 
 
1242
1243	dso->short_name		  = name;
1244	dso->short_name_len	  = strlen(name);
1245	dso->short_name_allocated = name_allocated;
 
 
 
 
 
1246}
1247
1248int dso__name_len(const struct dso *dso)
1249{
1250	if (!dso)
1251		return strlen("[unknown]");
1252	if (verbose > 0)
1253		return dso->long_name_len;
1254
1255	return dso->short_name_len;
1256}
1257
1258bool dso__loaded(const struct dso *dso)
1259{
1260	return dso->loaded;
1261}
1262
1263bool dso__sorted_by_name(const struct dso *dso)
1264{
1265	return dso->sorted_by_name;
1266}
1267
1268void dso__set_sorted_by_name(struct dso *dso)
1269{
1270	dso->sorted_by_name = true;
1271}
1272
1273struct dso *dso__new_id(const char *name, struct dso_id *id)
1274{
1275	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
 
 
1276
1277	if (dso != NULL) {
 
 
 
1278		strcpy(dso->name, name);
1279		if (id)
1280			dso->id = *id;
1281		dso__set_long_name_id(dso, dso->name, id, false);
1282		dso__set_short_name(dso, dso->name, false);
1283		dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
1284		dso->data.cache = RB_ROOT;
 
1285		dso->inlined_nodes = RB_ROOT_CACHED;
1286		dso->srclines = RB_ROOT_CACHED;
 
 
1287		dso->data.fd = -1;
1288		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1289		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1290		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1291		dso->is_64_bit = (sizeof(void *) == 8);
1292		dso->loaded = 0;
1293		dso->rel = 0;
1294		dso->sorted_by_name = 0;
1295		dso->has_build_id = 0;
1296		dso->has_srcline = 1;
1297		dso->a2l_fails = 1;
1298		dso->kernel = DSO_SPACE__USER;
 
1299		dso->needs_swap = DSO_SWAP__UNSET;
1300		dso->comp = COMP_ID__NONE;
1301		RB_CLEAR_NODE(&dso->rb_node);
1302		dso->root = NULL;
1303		INIT_LIST_HEAD(&dso->node);
1304		INIT_LIST_HEAD(&dso->data.open_entry);
1305		mutex_init(&dso->lock);
1306		refcount_set(&dso->refcnt, 1);
 
 
 
 
 
 
 
 
1307	}
1308
1309	return dso;
1310}
1311
1312struct dso *dso__new(const char *name)
1313{
1314	return dso__new_id(name, NULL);
1315}
1316
1317void dso__delete(struct dso *dso)
1318{
1319	if (!RB_EMPTY_NODE(&dso->rb_node))
1320		pr_err("DSO %s is still in rbtree when being deleted!\n",
1321		       dso->long_name);
1322
1323	/* free inlines first, as they reference symbols */
1324	inlines__tree_delete(&dso->inlined_nodes);
1325	srcline__tree_delete(&dso->srclines);
1326	symbols__delete(&dso->symbols);
1327
1328	if (dso->short_name_allocated) {
1329		zfree((char **)&dso->short_name);
1330		dso->short_name_allocated = false;
1331	}
1332
1333	if (dso->long_name_allocated) {
1334		zfree((char **)&dso->long_name);
1335		dso->long_name_allocated = false;
 
 
 
 
1336	}
1337
1338	dso__data_close(dso);
1339	auxtrace_cache__free(dso->auxtrace_cache);
1340	dso_cache__free(dso);
1341	dso__free_a2l(dso);
1342	zfree(&dso->symsrc_filename);
1343	nsinfo__zput(dso->nsinfo);
1344	mutex_destroy(&dso->lock);
1345	free(dso);
1346}
1347
1348struct dso *dso__get(struct dso *dso)
1349{
1350	if (dso)
1351		refcount_inc(&dso->refcnt);
1352	return dso;
 
 
 
1353}
1354
1355void dso__put(struct dso *dso)
1356{
1357	if (dso && refcount_dec_and_test(&dso->refcnt))
1358		dso__delete(dso);
 
 
1359}
1360
1361void dso__set_build_id(struct dso *dso, struct build_id *bid)
1362{
1363	dso->bid = *bid;
1364	dso->has_build_id = 1;
1365}
1366
1367bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
1368{
1369	if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
 
 
1370		/*
1371		 * For the backward compatibility, it allows a build-id has
1372		 * trailing zeros.
1373		 */
1374		return !memcmp(dso->bid.data, bid->data, bid->size) &&
1375			!memchr_inv(&dso->bid.data[bid->size], 0,
1376				    dso->bid.size - bid->size);
1377	}
1378
1379	return dso->bid.size == bid->size &&
1380	       memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
1381}
1382
1383void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1384{
1385	char path[PATH_MAX];
1386
1387	if (machine__is_default_guest(machine))
1388		return;
1389	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1390	if (sysfs__read_build_id(path, &dso->bid) == 0)
1391		dso->has_build_id = true;
1392}
1393
1394int dso__kernel_module_get_build_id(struct dso *dso,
1395				    const char *root_dir)
1396{
1397	char filename[PATH_MAX];
1398	/*
1399	 * kernel module short names are of the form "[module]" and
1400	 * we need just "module" here.
1401	 */
1402	const char *name = dso->short_name + 1;
1403
1404	snprintf(filename, sizeof(filename),
1405		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1406		 root_dir, (int)strlen(name) - 1, name);
1407
1408	if (sysfs__read_build_id(filename, &dso->bid) == 0)
1409		dso->has_build_id = true;
1410
1411	return 0;
1412}
1413
1414static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1415{
1416	char sbuild_id[SBUILD_ID_SIZE];
1417
1418	build_id__sprintf(&dso->bid, sbuild_id);
1419	return fprintf(fp, "%s", sbuild_id);
1420}
1421
1422size_t dso__fprintf(struct dso *dso, FILE *fp)
1423{
1424	struct rb_node *nd;
1425	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1426
1427	if (dso->short_name != dso->long_name)
1428		ret += fprintf(fp, "%s, ", dso->long_name);
1429	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1430	ret += dso__fprintf_buildid(dso, fp);
1431	ret += fprintf(fp, ")\n");
1432	for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1433		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1434		ret += symbol__fprintf(pos, fp);
1435	}
1436
1437	return ret;
1438}
1439
1440enum dso_type dso__type(struct dso *dso, struct machine *machine)
1441{
1442	int fd;
1443	enum dso_type type = DSO__TYPE_UNKNOWN;
1444
1445	fd = dso__data_get_fd(dso, machine);
1446	if (fd >= 0) {
1447		type = dso__type_fd(fd);
1448		dso__data_put_fd(dso);
1449	}
1450
1451	return type;
1452}
1453
1454int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1455{
1456	int idx, errnum = dso->load_errno;
1457	/*
1458	 * This must have a same ordering as the enum dso_load_errno.
1459	 */
1460	static const char *dso_load__error_str[] = {
1461	"Internal tools/perf/ library error",
1462	"Invalid ELF file",
1463	"Can not read build id",
1464	"Mismatching build id",
1465	"Decompression failure",
1466	};
1467
1468	BUG_ON(buflen == 0);
1469
1470	if (errnum >= 0) {
1471		const char *err = str_error_r(errnum, buf, buflen);
1472
1473		if (err != buf)
1474			scnprintf(buf, buflen, "%s", err);
1475
1476		return 0;
1477	}
1478
1479	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1480		return -1;
1481
1482	idx = errnum - __DSO_LOAD_ERRNO__START;
1483	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1484	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1485}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2#include <asm/bug.h>
   3#include <linux/kernel.h>
   4#include <linux/string.h>
   5#include <linux/zalloc.h>
   6#include <sys/time.h>
   7#include <sys/resource.h>
   8#include <sys/types.h>
   9#include <sys/stat.h>
  10#include <unistd.h>
  11#include <errno.h>
  12#include <fcntl.h>
  13#include <stdlib.h>
  14#ifdef HAVE_LIBBPF_SUPPORT
  15#include <bpf/libbpf.h>
  16#include "bpf-event.h"
  17#include "bpf-utils.h"
  18#endif
  19#include "compress.h"
  20#include "env.h"
  21#include "namespaces.h"
  22#include "path.h"
  23#include "map.h"
  24#include "symbol.h"
  25#include "srcline.h"
  26#include "dso.h"
  27#include "dsos.h"
  28#include "machine.h"
  29#include "auxtrace.h"
  30#include "util.h" /* O_CLOEXEC for older systems */
  31#include "debug.h"
  32#include "string2.h"
  33#include "vdso.h"
  34#include "annotate-data.h"
  35
  36static const char * const debuglink_paths[] = {
  37	"%.0s%s",
  38	"%s/%s",
  39	"%s/.debug/%s",
  40	"/usr/lib/debug%s/%s"
  41};
  42
  43void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi)
  44{
  45	nsinfo__put(RC_CHK_ACCESS(dso)->nsinfo);
  46	RC_CHK_ACCESS(dso)->nsinfo = nsi;
  47}
  48
  49char dso__symtab_origin(const struct dso *dso)
  50{
  51	static const char origin[] = {
  52		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
  53		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
  54		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
  55		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
  56		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
  57		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
  58		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
  59		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
  60		[DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO]	= 'x',
  61		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
  62		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
  63		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
  64		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
  65		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
  66		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
  67		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
  68		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
  69		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
  70	};
  71
  72	if (dso == NULL || dso__symtab_type(dso) == DSO_BINARY_TYPE__NOT_FOUND)
  73		return '!';
  74	return origin[dso__symtab_type(dso)];
  75}
  76
  77bool dso__is_object_file(const struct dso *dso)
  78{
  79	switch (dso__binary_type(dso)) {
  80	case DSO_BINARY_TYPE__KALLSYMS:
  81	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
  82	case DSO_BINARY_TYPE__JAVA_JIT:
  83	case DSO_BINARY_TYPE__BPF_PROG_INFO:
  84	case DSO_BINARY_TYPE__BPF_IMAGE:
  85	case DSO_BINARY_TYPE__OOL:
  86		return false;
  87	case DSO_BINARY_TYPE__VMLINUX:
  88	case DSO_BINARY_TYPE__GUEST_VMLINUX:
  89	case DSO_BINARY_TYPE__DEBUGLINK:
  90	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
  91	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
  92	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
  93	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
  94	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
  95	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
  96	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
  97	case DSO_BINARY_TYPE__GUEST_KMODULE:
  98	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
  99	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
 100	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
 101	case DSO_BINARY_TYPE__KCORE:
 102	case DSO_BINARY_TYPE__GUEST_KCORE:
 103	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
 104	case DSO_BINARY_TYPE__NOT_FOUND:
 105	default:
 106		return true;
 107	}
 108}
 109
 110int dso__read_binary_type_filename(const struct dso *dso,
 111				   enum dso_binary_type type,
 112				   char *root_dir, char *filename, size_t size)
 113{
 114	char build_id_hex[SBUILD_ID_SIZE];
 115	int ret = 0;
 116	size_t len;
 117
 118	switch (type) {
 119	case DSO_BINARY_TYPE__DEBUGLINK:
 120	{
 121		const char *last_slash;
 122		char dso_dir[PATH_MAX];
 123		char symfile[PATH_MAX];
 124		unsigned int i;
 125
 126		len = __symbol__join_symfs(filename, size, dso__long_name(dso));
 127		last_slash = filename + len;
 128		while (last_slash != filename && *last_slash != '/')
 129			last_slash--;
 130
 131		strncpy(dso_dir, filename, last_slash - filename);
 132		dso_dir[last_slash-filename] = '\0';
 133
 134		if (!is_regular_file(filename)) {
 135			ret = -1;
 136			break;
 137		}
 138
 139		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
 140		if (ret)
 141			break;
 142
 143		/* Check predefined locations where debug file might reside */
 144		ret = -1;
 145		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
 146			snprintf(filename, size,
 147					debuglink_paths[i], dso_dir, symfile);
 148			if (is_regular_file(filename)) {
 149				ret = 0;
 150				break;
 151			}
 152		}
 153
 154		break;
 155	}
 156	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
 157		if (dso__build_id_filename(dso, filename, size, false) == NULL)
 158			ret = -1;
 159		break;
 160
 161	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
 162		if (dso__build_id_filename(dso, filename, size, true) == NULL)
 163			ret = -1;
 164		break;
 165
 166	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
 167		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 168		snprintf(filename + len, size - len, "%s.debug", dso__long_name(dso));
 169		break;
 170
 171	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
 172		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 173		snprintf(filename + len, size - len, "%s", dso__long_name(dso));
 174		break;
 175
 176	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
 177		/*
 178		 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
 179		 * /usr/lib/debug/lib when it is expected to be in
 180		 * /usr/lib/debug/usr/lib
 181		 */
 182		if (strlen(dso__long_name(dso)) < 9 ||
 183		    strncmp(dso__long_name(dso), "/usr/lib/", 9)) {
 184			ret = -1;
 185			break;
 186		}
 187		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
 188		snprintf(filename + len, size - len, "%s", dso__long_name(dso) + 4);
 189		break;
 190
 191	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
 192	{
 193		const char *last_slash;
 194		size_t dir_size;
 195
 196		last_slash = dso__long_name(dso) + dso__long_name_len(dso);
 197		while (last_slash != dso__long_name(dso) && *last_slash != '/')
 198			last_slash--;
 199
 200		len = __symbol__join_symfs(filename, size, "");
 201		dir_size = last_slash - dso__long_name(dso) + 2;
 202		if (dir_size > (size - len)) {
 203			ret = -1;
 204			break;
 205		}
 206		len += scnprintf(filename + len, dir_size, "%s",  dso__long_name(dso));
 207		len += scnprintf(filename + len , size - len, ".debug%s",
 208								last_slash);
 209		break;
 210	}
 211
 212	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
 213		if (!dso__has_build_id(dso)) {
 214			ret = -1;
 215			break;
 216		}
 217
 218		build_id__sprintf(dso__bid_const(dso), build_id_hex);
 219		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
 220		snprintf(filename + len, size - len, "%.2s/%s.debug",
 221			 build_id_hex, build_id_hex + 2);
 222		break;
 223
 224	case DSO_BINARY_TYPE__VMLINUX:
 225	case DSO_BINARY_TYPE__GUEST_VMLINUX:
 226	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
 227		__symbol__join_symfs(filename, size, dso__long_name(dso));
 228		break;
 229
 230	case DSO_BINARY_TYPE__GUEST_KMODULE:
 231	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
 232		path__join3(filename, size, symbol_conf.symfs,
 233			    root_dir, dso__long_name(dso));
 234		break;
 235
 236	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
 237	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
 238		__symbol__join_symfs(filename, size, dso__long_name(dso));
 239		break;
 240
 241	case DSO_BINARY_TYPE__KCORE:
 242	case DSO_BINARY_TYPE__GUEST_KCORE:
 243		snprintf(filename, size, "%s", dso__long_name(dso));
 244		break;
 245
 246	default:
 247	case DSO_BINARY_TYPE__KALLSYMS:
 248	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
 249	case DSO_BINARY_TYPE__JAVA_JIT:
 250	case DSO_BINARY_TYPE__BPF_PROG_INFO:
 251	case DSO_BINARY_TYPE__BPF_IMAGE:
 252	case DSO_BINARY_TYPE__OOL:
 253	case DSO_BINARY_TYPE__NOT_FOUND:
 254		ret = -1;
 255		break;
 256	}
 257
 258	return ret;
 259}
 260
 261enum {
 262	COMP_ID__NONE = 0,
 263};
 264
 265static const struct {
 266	const char *fmt;
 267	int (*decompress)(const char *input, int output);
 268	bool (*is_compressed)(const char *input);
 269} compressions[] = {
 270	[COMP_ID__NONE] = { .fmt = NULL, },
 271#ifdef HAVE_ZLIB_SUPPORT
 272	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
 273#endif
 274#ifdef HAVE_LZMA_SUPPORT
 275	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
 276#endif
 277	{ NULL, NULL, NULL },
 278};
 279
 280static int is_supported_compression(const char *ext)
 281{
 282	unsigned i;
 283
 284	for (i = 1; compressions[i].fmt; i++) {
 285		if (!strcmp(ext, compressions[i].fmt))
 286			return i;
 287	}
 288	return COMP_ID__NONE;
 289}
 290
 291bool is_kernel_module(const char *pathname, int cpumode)
 292{
 293	struct kmod_path m;
 294	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
 295
 296	WARN_ONCE(mode != cpumode,
 297		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
 298		  cpumode);
 299
 300	switch (mode) {
 301	case PERF_RECORD_MISC_USER:
 302	case PERF_RECORD_MISC_HYPERVISOR:
 303	case PERF_RECORD_MISC_GUEST_USER:
 304		return false;
 305	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
 306	default:
 307		if (kmod_path__parse(&m, pathname)) {
 308			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
 309					pathname);
 310			return true;
 311		}
 312	}
 313
 314	return m.kmod;
 315}
 316
 317bool dso__needs_decompress(struct dso *dso)
 318{
 319	return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
 320		dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 321}
 322
 323int filename__decompress(const char *name, char *pathname,
 324			 size_t len, int comp, int *err)
 325{
 326	char tmpbuf[] = KMOD_DECOMP_NAME;
 327	int fd = -1;
 328
 329	/*
 330	 * We have proper compression id for DSO and yet the file
 331	 * behind the 'name' can still be plain uncompressed object.
 332	 *
 333	 * The reason is behind the logic we open the DSO object files,
 334	 * when we try all possible 'debug' objects until we find the
 335	 * data. So even if the DSO is represented by 'krava.xz' module,
 336	 * we can end up here opening ~/.debug/....23432432/debug' file
 337	 * which is not compressed.
 338	 *
 339	 * To keep this transparent, we detect this and return the file
 340	 * descriptor to the uncompressed file.
 341	 */
 342	if (!compressions[comp].is_compressed(name))
 343		return open(name, O_RDONLY);
 344
 345	fd = mkstemp(tmpbuf);
 346	if (fd < 0) {
 347		*err = errno;
 348		return -1;
 349	}
 350
 351	if (compressions[comp].decompress(name, fd)) {
 352		*err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
 353		close(fd);
 354		fd = -1;
 355	}
 356
 357	if (!pathname || (fd < 0))
 358		unlink(tmpbuf);
 359
 360	if (pathname && (fd >= 0))
 361		strlcpy(pathname, tmpbuf, len);
 362
 363	return fd;
 364}
 365
 366static int decompress_kmodule(struct dso *dso, const char *name,
 367			      char *pathname, size_t len)
 368{
 369	if (!dso__needs_decompress(dso))
 370		return -1;
 371
 372	if (dso__comp(dso) == COMP_ID__NONE)
 373		return -1;
 374
 375	return filename__decompress(name, pathname, len, dso__comp(dso), dso__load_errno(dso));
 
 376}
 377
 378int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
 379{
 380	return decompress_kmodule(dso, name, NULL, 0);
 381}
 382
 383int dso__decompress_kmodule_path(struct dso *dso, const char *name,
 384				 char *pathname, size_t len)
 385{
 386	int fd = decompress_kmodule(dso, name, pathname, len);
 387
 388	close(fd);
 389	return fd >= 0 ? 0 : -1;
 390}
 391
 392/*
 393 * Parses kernel module specified in @path and updates
 394 * @m argument like:
 395 *
 396 *    @comp - true if @path contains supported compression suffix,
 397 *            false otherwise
 398 *    @kmod - true if @path contains '.ko' suffix in right position,
 399 *            false otherwise
 400 *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
 401 *            of the kernel module without suffixes, otherwise strudup-ed
 402 *            base name of @path
 403 *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
 404 *            the compression suffix
 405 *
 406 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
 407 */
 408int __kmod_path__parse(struct kmod_path *m, const char *path,
 409		       bool alloc_name)
 410{
 411	const char *name = strrchr(path, '/');
 412	const char *ext  = strrchr(path, '.');
 413	bool is_simple_name = false;
 414
 415	memset(m, 0x0, sizeof(*m));
 416	name = name ? name + 1 : path;
 417
 418	/*
 419	 * '.' is also a valid character for module name. For example:
 420	 * [aaa.bbb] is a valid module name. '[' should have higher
 421	 * priority than '.ko' suffix.
 422	 *
 423	 * The kernel names are from machine__mmap_name. Such
 424	 * name should belong to kernel itself, not kernel module.
 425	 */
 426	if (name[0] == '[') {
 427		is_simple_name = true;
 428		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
 429		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
 430		    (strncmp(name, "[vdso]", 6) == 0) ||
 431		    (strncmp(name, "[vdso32]", 8) == 0) ||
 432		    (strncmp(name, "[vdsox32]", 9) == 0) ||
 433		    (strncmp(name, "[vsyscall]", 10) == 0)) {
 434			m->kmod = false;
 435
 436		} else
 437			m->kmod = true;
 438	}
 439
 440	/* No extension, just return name. */
 441	if ((ext == NULL) || is_simple_name) {
 442		if (alloc_name) {
 443			m->name = strdup(name);
 444			return m->name ? 0 : -ENOMEM;
 445		}
 446		return 0;
 447	}
 448
 449	m->comp = is_supported_compression(ext + 1);
 450	if (m->comp > COMP_ID__NONE)
 451		ext -= 3;
 452
 453	/* Check .ko extension only if there's enough name left. */
 454	if (ext > name)
 455		m->kmod = !strncmp(ext, ".ko", 3);
 456
 457	if (alloc_name) {
 458		if (m->kmod) {
 459			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
 460				return -ENOMEM;
 461		} else {
 462			if (asprintf(&m->name, "%s", name) == -1)
 463				return -ENOMEM;
 464		}
 465
 466		strreplace(m->name, '-', '_');
 467	}
 468
 469	return 0;
 470}
 471
 472void dso__set_module_info(struct dso *dso, struct kmod_path *m,
 473			  struct machine *machine)
 474{
 475	if (machine__is_host(machine))
 476		dso__set_symtab_type(dso, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE);
 477	else
 478		dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KMODULE);
 479
 480	/* _KMODULE_COMP should be next to _KMODULE */
 481	if (m->kmod && m->comp) {
 482		dso__set_symtab_type(dso, dso__symtab_type(dso) + 1);
 483		dso__set_comp(dso, m->comp);
 484	}
 485
 486	dso__set_is_kmod(dso);
 487	dso__set_short_name(dso, strdup(m->name), true);
 488}
 489
 490/*
 491 * Global list of open DSOs and the counter.
 492 */
 493static LIST_HEAD(dso__data_open);
 494static long dso__data_open_cnt;
 495static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
 496
 497static void dso__list_add(struct dso *dso)
 498{
 499	list_add_tail(&dso__data(dso)->open_entry, &dso__data_open);
 500#ifdef REFCNT_CHECKING
 501	dso__data(dso)->dso = dso__get(dso);
 502#endif
 503	/* Assume the dso is part of dsos, hence the optional reference count above. */
 504	assert(dso__dsos(dso));
 505	dso__data_open_cnt++;
 506}
 507
 508static void dso__list_del(struct dso *dso)
 509{
 510	list_del_init(&dso__data(dso)->open_entry);
 511#ifdef REFCNT_CHECKING
 512	dso__put(dso__data(dso)->dso);
 513#endif
 514	WARN_ONCE(dso__data_open_cnt <= 0,
 515		  "DSO data fd counter out of bounds.");
 516	dso__data_open_cnt--;
 517}
 518
 519static void close_first_dso(void);
 520
 521static int do_open(char *name)
 522{
 523	int fd;
 524	char sbuf[STRERR_BUFSIZE];
 525
 526	do {
 527		fd = open(name, O_RDONLY|O_CLOEXEC);
 528		if (fd >= 0)
 529			return fd;
 530
 531		pr_debug("dso open failed: %s\n",
 532			 str_error_r(errno, sbuf, sizeof(sbuf)));
 533		if (!dso__data_open_cnt || errno != EMFILE)
 534			break;
 535
 536		close_first_dso();
 537	} while (1);
 538
 539	return -1;
 540}
 541
 542char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
 543{
 544	return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso)), filename);
 545}
 546
 547static int __open_dso(struct dso *dso, struct machine *machine)
 548{
 549	int fd = -EINVAL;
 550	char *root_dir = (char *)"";
 551	char *name = malloc(PATH_MAX);
 552	bool decomp = false;
 553
 554	if (!name)
 555		return -ENOMEM;
 556
 557	mutex_lock(dso__lock(dso));
 558	if (machine)
 559		root_dir = machine->root_dir;
 560
 561	if (dso__read_binary_type_filename(dso, dso__binary_type(dso),
 562					    root_dir, name, PATH_MAX))
 563		goto out;
 564
 565	if (!is_regular_file(name)) {
 566		char *new_name;
 567
 568		if (errno != ENOENT || dso__nsinfo(dso) == NULL)
 569			goto out;
 570
 571		new_name = dso__filename_with_chroot(dso, name);
 572		if (!new_name)
 573			goto out;
 574
 575		free(name);
 576		name = new_name;
 577	}
 578
 579	if (dso__needs_decompress(dso)) {
 580		char newpath[KMOD_DECOMP_LEN];
 581		size_t len = sizeof(newpath);
 582
 583		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
 584			fd = -(*dso__load_errno(dso));
 585			goto out;
 586		}
 587
 588		decomp = true;
 589		strcpy(name, newpath);
 590	}
 591
 592	fd = do_open(name);
 593
 594	if (decomp)
 595		unlink(name);
 596
 597out:
 598	mutex_unlock(dso__lock(dso));
 599	free(name);
 600	return fd;
 601}
 602
 603static void check_data_close(void);
 604
 605/**
 606 * dso_close - Open DSO data file
 607 * @dso: dso object
 608 *
 609 * Open @dso's data file descriptor and updates
 610 * list/count of open DSO objects.
 611 */
 612static int open_dso(struct dso *dso, struct machine *machine)
 613{
 614	int fd;
 615	struct nscookie nsc;
 616
 617	if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
 618		mutex_lock(dso__lock(dso));
 619		nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
 620		mutex_unlock(dso__lock(dso));
 621	}
 622	fd = __open_dso(dso, machine);
 623	if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE)
 624		nsinfo__mountns_exit(&nsc);
 625
 626	if (fd >= 0) {
 627		dso__list_add(dso);
 628		/*
 629		 * Check if we crossed the allowed number
 630		 * of opened DSOs and close one if needed.
 631		 */
 632		check_data_close();
 633	}
 634
 635	return fd;
 636}
 637
 638static void close_data_fd(struct dso *dso)
 639{
 640	if (dso__data(dso)->fd >= 0) {
 641		close(dso__data(dso)->fd);
 642		dso__data(dso)->fd = -1;
 643		dso__data(dso)->file_size = 0;
 644		dso__list_del(dso);
 645	}
 646}
 647
 648/**
 649 * dso_close - Close DSO data file
 650 * @dso: dso object
 651 *
 652 * Close @dso's data file descriptor and updates
 653 * list/count of open DSO objects.
 654 */
 655static void close_dso(struct dso *dso)
 656{
 657	close_data_fd(dso);
 658}
 659
 660static void close_first_dso(void)
 661{
 662	struct dso_data *dso_data;
 663	struct dso *dso;
 664
 665	dso_data = list_first_entry(&dso__data_open, struct dso_data, open_entry);
 666#ifdef REFCNT_CHECKING
 667	dso = dso_data->dso;
 668#else
 669	dso = container_of(dso_data, struct dso, data);
 670#endif
 671	close_dso(dso);
 672}
 673
 674static rlim_t get_fd_limit(void)
 675{
 676	struct rlimit l;
 677	rlim_t limit = 0;
 678
 679	/* Allow half of the current open fd limit. */
 680	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
 681		if (l.rlim_cur == RLIM_INFINITY)
 682			limit = l.rlim_cur;
 683		else
 684			limit = l.rlim_cur / 2;
 685	} else {
 686		pr_err("failed to get fd limit\n");
 687		limit = 1;
 688	}
 689
 690	return limit;
 691}
 692
 693static rlim_t fd_limit;
 694
 695/*
 696 * Used only by tests/dso-data.c to reset the environment
 697 * for tests. I dont expect we should change this during
 698 * standard runtime.
 699 */
 700void reset_fd_limit(void)
 701{
 702	fd_limit = 0;
 703}
 704
 705static bool may_cache_fd(void)
 706{
 707	if (!fd_limit)
 708		fd_limit = get_fd_limit();
 709
 710	if (fd_limit == RLIM_INFINITY)
 711		return true;
 712
 713	return fd_limit > (rlim_t) dso__data_open_cnt;
 714}
 715
 716/*
 717 * Check and close LRU dso if we crossed allowed limit
 718 * for opened dso file descriptors. The limit is half
 719 * of the RLIMIT_NOFILE files opened.
 720*/
 721static void check_data_close(void)
 722{
 723	bool cache_fd = may_cache_fd();
 724
 725	if (!cache_fd)
 726		close_first_dso();
 727}
 728
 729/**
 730 * dso__data_close - Close DSO data file
 731 * @dso: dso object
 732 *
 733 * External interface to close @dso's data file descriptor.
 734 */
 735void dso__data_close(struct dso *dso)
 736{
 737	pthread_mutex_lock(&dso__data_open_lock);
 738	close_dso(dso);
 739	pthread_mutex_unlock(&dso__data_open_lock);
 740}
 741
 742static void try_to_open_dso(struct dso *dso, struct machine *machine)
 743{
 744	enum dso_binary_type binary_type_data[] = {
 745		DSO_BINARY_TYPE__BUILD_ID_CACHE,
 746		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
 747		DSO_BINARY_TYPE__NOT_FOUND,
 748	};
 749	int i = 0;
 750	struct dso_data *dso_data = dso__data(dso);
 751
 752	if (dso_data->fd >= 0)
 753		return;
 754
 755	if (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND) {
 756		dso_data->fd = open_dso(dso, machine);
 757		goto out;
 758	}
 759
 760	do {
 761		dso__set_binary_type(dso, binary_type_data[i++]);
 762
 763		dso_data->fd = open_dso(dso, machine);
 764		if (dso_data->fd >= 0)
 765			goto out;
 766
 767	} while (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND);
 768out:
 769	if (dso_data->fd >= 0)
 770		dso_data->status = DSO_DATA_STATUS_OK;
 771	else
 772		dso_data->status = DSO_DATA_STATUS_ERROR;
 773}
 774
 775/**
 776 * dso__data_get_fd - Get dso's data file descriptor
 777 * @dso: dso object
 778 * @machine: machine object
 779 *
 780 * External interface to find dso's file, open it and
 781 * returns file descriptor.  It should be paired with
 782 * dso__data_put_fd() if it returns non-negative value.
 783 */
 784int dso__data_get_fd(struct dso *dso, struct machine *machine)
 785{
 786	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
 787		return -1;
 788
 789	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
 790		return -1;
 791
 792	try_to_open_dso(dso, machine);
 793
 794	if (dso__data(dso)->fd < 0)
 795		pthread_mutex_unlock(&dso__data_open_lock);
 796
 797	return dso__data(dso)->fd;
 798}
 799
 800void dso__data_put_fd(struct dso *dso __maybe_unused)
 801{
 802	pthread_mutex_unlock(&dso__data_open_lock);
 803}
 804
 805bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
 806{
 807	u32 flag = 1 << by;
 808
 809	if (dso__data(dso)->status_seen & flag)
 810		return true;
 811
 812	dso__data(dso)->status_seen |= flag;
 813
 814	return false;
 815}
 816
 817#ifdef HAVE_LIBBPF_SUPPORT
 818static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
 819{
 820	struct bpf_prog_info_node *node;
 821	ssize_t size = DSO__DATA_CACHE_SIZE;
 822	struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
 823	u64 len;
 824	u8 *buf;
 825
 826	node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
 827	if (!node || !node->info_linear) {
 828		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
 829		return -1;
 830	}
 831
 832	len = node->info_linear->info.jited_prog_len;
 833	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
 834
 835	if (offset >= len)
 836		return -1;
 837
 838	size = (ssize_t)min(len - offset, (u64)size);
 839	memcpy(data, buf + offset, size);
 840	return size;
 841}
 842
 843static int bpf_size(struct dso *dso)
 844{
 845	struct bpf_prog_info_node *node;
 846	struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
 847
 848	node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
 849	if (!node || !node->info_linear) {
 850		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
 851		return -1;
 852	}
 853
 854	dso__data(dso)->file_size = node->info_linear->info.jited_prog_len;
 855	return 0;
 856}
 857#endif // HAVE_LIBBPF_SUPPORT
 858
 859static void
 860dso_cache__free(struct dso *dso)
 861{
 862	struct rb_root *root = &dso__data(dso)->cache;
 863	struct rb_node *next = rb_first(root);
 864
 865	mutex_lock(dso__lock(dso));
 866	while (next) {
 867		struct dso_cache *cache;
 868
 869		cache = rb_entry(next, struct dso_cache, rb_node);
 870		next = rb_next(&cache->rb_node);
 871		rb_erase(&cache->rb_node, root);
 872		free(cache);
 873	}
 874	mutex_unlock(dso__lock(dso));
 875}
 876
 877static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
 878{
 879	const struct rb_root *root = &dso__data(dso)->cache;
 880	struct rb_node * const *p = &root->rb_node;
 881	const struct rb_node *parent = NULL;
 882	struct dso_cache *cache;
 883
 884	while (*p != NULL) {
 885		u64 end;
 886
 887		parent = *p;
 888		cache = rb_entry(parent, struct dso_cache, rb_node);
 889		end = cache->offset + DSO__DATA_CACHE_SIZE;
 890
 891		if (offset < cache->offset)
 892			p = &(*p)->rb_left;
 893		else if (offset >= end)
 894			p = &(*p)->rb_right;
 895		else
 896			return cache;
 897	}
 898
 899	return NULL;
 900}
 901
 902static struct dso_cache *
 903dso_cache__insert(struct dso *dso, struct dso_cache *new)
 904{
 905	struct rb_root *root = &dso__data(dso)->cache;
 906	struct rb_node **p = &root->rb_node;
 907	struct rb_node *parent = NULL;
 908	struct dso_cache *cache;
 909	u64 offset = new->offset;
 910
 911	mutex_lock(dso__lock(dso));
 912	while (*p != NULL) {
 913		u64 end;
 914
 915		parent = *p;
 916		cache = rb_entry(parent, struct dso_cache, rb_node);
 917		end = cache->offset + DSO__DATA_CACHE_SIZE;
 918
 919		if (offset < cache->offset)
 920			p = &(*p)->rb_left;
 921		else if (offset >= end)
 922			p = &(*p)->rb_right;
 923		else
 924			goto out;
 925	}
 926
 927	rb_link_node(&new->rb_node, parent, p);
 928	rb_insert_color(&new->rb_node, root);
 929
 930	cache = NULL;
 931out:
 932	mutex_unlock(dso__lock(dso));
 933	return cache;
 934}
 935
 936static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
 937				 u64 size, bool out)
 938{
 939	u64 cache_offset = offset - cache->offset;
 940	u64 cache_size   = min(cache->size - cache_offset, size);
 941
 942	if (out)
 943		memcpy(data, cache->data + cache_offset, cache_size);
 944	else
 945		memcpy(cache->data + cache_offset, data, cache_size);
 946	return cache_size;
 947}
 948
 949static ssize_t file_read(struct dso *dso, struct machine *machine,
 950			 u64 offset, char *data)
 951{
 952	ssize_t ret;
 953
 954	pthread_mutex_lock(&dso__data_open_lock);
 955
 956	/*
 957	 * dso__data(dso)->fd might be closed if other thread opened another
 958	 * file (dso) due to open file limit (RLIMIT_NOFILE).
 959	 */
 960	try_to_open_dso(dso, machine);
 961
 962	if (dso__data(dso)->fd < 0) {
 963		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
 964		ret = -errno;
 965		goto out;
 966	}
 967
 968	ret = pread(dso__data(dso)->fd, data, DSO__DATA_CACHE_SIZE, offset);
 969out:
 970	pthread_mutex_unlock(&dso__data_open_lock);
 971	return ret;
 972}
 973
 974static struct dso_cache *dso_cache__populate(struct dso *dso,
 975					     struct machine *machine,
 976					     u64 offset, ssize_t *ret)
 977{
 978	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
 979	struct dso_cache *cache;
 980	struct dso_cache *old;
 981
 982	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
 983	if (!cache) {
 984		*ret = -ENOMEM;
 985		return NULL;
 986	}
 987#ifdef HAVE_LIBBPF_SUPPORT
 988	if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
 989		*ret = bpf_read(dso, cache_offset, cache->data);
 990	else
 991#endif
 992	if (dso__binary_type(dso) == DSO_BINARY_TYPE__OOL)
 993		*ret = DSO__DATA_CACHE_SIZE;
 994	else
 995		*ret = file_read(dso, machine, cache_offset, cache->data);
 996
 997	if (*ret <= 0) {
 998		free(cache);
 999		return NULL;
1000	}
1001
1002	cache->offset = cache_offset;
1003	cache->size   = *ret;
1004
1005	old = dso_cache__insert(dso, cache);
1006	if (old) {
1007		/* we lose the race */
1008		free(cache);
1009		cache = old;
1010	}
1011
1012	return cache;
1013}
1014
1015static struct dso_cache *dso_cache__find(struct dso *dso,
1016					 struct machine *machine,
1017					 u64 offset,
1018					 ssize_t *ret)
1019{
1020	struct dso_cache *cache = __dso_cache__find(dso, offset);
1021
1022	return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
1023}
1024
1025static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
1026			    u64 offset, u8 *data, ssize_t size, bool out)
1027{
1028	struct dso_cache *cache;
1029	ssize_t ret = 0;
1030
1031	cache = dso_cache__find(dso, machine, offset, &ret);
1032	if (!cache)
1033		return ret;
1034
1035	return dso_cache__memcpy(cache, offset, data, size, out);
1036}
1037
1038/*
1039 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
1040 * in the rb_tree. Any read to already cached data is served
1041 * by cached data. Writes update the cache only, not the backing file.
1042 */
1043static ssize_t cached_io(struct dso *dso, struct machine *machine,
1044			 u64 offset, u8 *data, ssize_t size, bool out)
1045{
1046	ssize_t r = 0;
1047	u8 *p = data;
1048
1049	do {
1050		ssize_t ret;
1051
1052		ret = dso_cache_io(dso, machine, offset, p, size, out);
1053		if (ret < 0)
1054			return ret;
1055
1056		/* Reached EOF, return what we have. */
1057		if (!ret)
1058			break;
1059
1060		BUG_ON(ret > size);
1061
1062		r      += ret;
1063		p      += ret;
1064		offset += ret;
1065		size   -= ret;
1066
1067	} while (size);
1068
1069	return r;
1070}
1071
1072static int file_size(struct dso *dso, struct machine *machine)
1073{
1074	int ret = 0;
1075	struct stat st;
1076	char sbuf[STRERR_BUFSIZE];
1077
1078	pthread_mutex_lock(&dso__data_open_lock);
1079
1080	/*
1081	 * dso__data(dso)->fd might be closed if other thread opened another
1082	 * file (dso) due to open file limit (RLIMIT_NOFILE).
1083	 */
1084	try_to_open_dso(dso, machine);
1085
1086	if (dso__data(dso)->fd < 0) {
1087		ret = -errno;
1088		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
1089		goto out;
1090	}
1091
1092	if (fstat(dso__data(dso)->fd, &st) < 0) {
1093		ret = -errno;
1094		pr_err("dso cache fstat failed: %s\n",
1095		       str_error_r(errno, sbuf, sizeof(sbuf)));
1096		dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
1097		goto out;
1098	}
1099	dso__data(dso)->file_size = st.st_size;
1100
1101out:
1102	pthread_mutex_unlock(&dso__data_open_lock);
1103	return ret;
1104}
1105
1106int dso__data_file_size(struct dso *dso, struct machine *machine)
1107{
1108	if (dso__data(dso)->file_size)
1109		return 0;
1110
1111	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1112		return -1;
1113#ifdef HAVE_LIBBPF_SUPPORT
1114	if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
1115		return bpf_size(dso);
1116#endif
1117	return file_size(dso, machine);
1118}
1119
1120/**
1121 * dso__data_size - Return dso data size
1122 * @dso: dso object
1123 * @machine: machine object
1124 *
1125 * Return: dso data size
1126 */
1127off_t dso__data_size(struct dso *dso, struct machine *machine)
1128{
1129	if (dso__data_file_size(dso, machine))
1130		return -1;
1131
1132	/* For now just estimate dso data size is close to file size */
1133	return dso__data(dso)->file_size;
1134}
1135
1136static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1137				      u64 offset, u8 *data, ssize_t size,
1138				      bool out)
1139{
1140	if (dso__data_file_size(dso, machine))
1141		return -1;
1142
1143	/* Check the offset sanity. */
1144	if (offset > dso__data(dso)->file_size)
1145		return -1;
1146
1147	if (offset + size < offset)
1148		return -1;
1149
1150	return cached_io(dso, machine, offset, data, size, out);
1151}
1152
1153/**
1154 * dso__data_read_offset - Read data from dso file offset
1155 * @dso: dso object
1156 * @machine: machine object
1157 * @offset: file offset
1158 * @data: buffer to store data
1159 * @size: size of the @data buffer
1160 *
1161 * External interface to read data from dso file offset. Open
1162 * dso data file and use cached_read to get the data.
1163 */
1164ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1165			      u64 offset, u8 *data, ssize_t size)
1166{
1167	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1168		return -1;
1169
1170	return data_read_write_offset(dso, machine, offset, data, size, true);
1171}
1172
1173/**
1174 * dso__data_read_addr - Read data from dso address
1175 * @dso: dso object
1176 * @machine: machine object
1177 * @add: virtual memory address
1178 * @data: buffer to store data
1179 * @size: size of the @data buffer
1180 *
1181 * External interface to read data from dso address.
1182 */
1183ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1184			    struct machine *machine, u64 addr,
1185			    u8 *data, ssize_t size)
1186{
1187	u64 offset = map__map_ip(map, addr);
1188
1189	return dso__data_read_offset(dso, machine, offset, data, size);
1190}
1191
1192/**
1193 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1194 * @dso: dso object
1195 * @machine: machine object
1196 * @offset: file offset
1197 * @data: buffer to write
1198 * @size: size of the @data buffer
1199 *
1200 * Write into the dso file data cache, but do not change the file itself.
1201 */
1202ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1203				   u64 offset, const u8 *data_in, ssize_t size)
1204{
1205	u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1206
1207	if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1208		return -1;
1209
1210	return data_read_write_offset(dso, machine, offset, data, size, false);
1211}
1212
1213/**
1214 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1215 * @dso: dso object
1216 * @machine: machine object
1217 * @add: virtual memory address
1218 * @data: buffer to write
1219 * @size: size of the @data buffer
1220 *
1221 * External interface to write into the dso file data cache, but do not change
1222 * the file itself.
1223 */
1224ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1225				   struct machine *machine, u64 addr,
1226				   const u8 *data, ssize_t size)
1227{
1228	u64 offset = map__map_ip(map, addr);
1229
1230	return dso__data_write_cache_offs(dso, machine, offset, data, size);
1231}
1232
1233struct map *dso__new_map(const char *name)
1234{
1235	struct map *map = NULL;
1236	struct dso *dso = dso__new(name);
1237
1238	if (dso) {
1239		map = map__new2(0, dso);
1240		dso__put(dso);
1241	}
1242
1243	return map;
1244}
1245
1246struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1247				    const char *short_name, int dso_type)
1248{
1249	/*
1250	 * The kernel dso could be created by build_id processing.
1251	 */
1252	struct dso *dso = machine__findnew_dso(machine, name);
1253
1254	/*
1255	 * We need to run this in all cases, since during the build_id
1256	 * processing we had no idea this was the kernel dso.
1257	 */
1258	if (dso != NULL) {
1259		dso__set_short_name(dso, short_name, false);
1260		dso__set_kernel(dso, dso_type);
1261	}
1262
1263	return dso;
1264}
1265
1266static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
1267{
1268	struct dsos *dsos = dso__dsos(dso);
1269
1270	if (name == NULL)
1271		return;
1272
1273	if (dsos) {
 
 
 
 
1274		/*
1275		 * Need to avoid re-sorting the dsos breaking by non-atomically
1276		 * renaming the dso.
1277		 */
1278		down_write(&dsos->lock);
 
1279	}
1280
1281	if (dso__long_name_allocated(dso))
1282		free((char *)dso__long_name(dso));
 
1283
1284	RC_CHK_ACCESS(dso)->long_name = name;
1285	RC_CHK_ACCESS(dso)->long_name_len = strlen(name);
1286	dso__set_long_name_allocated(dso, name_allocated);
1287
1288	if (dsos) {
1289		dsos->sorted = false;
1290		up_write(&dsos->lock);
1291	}
1292}
1293
1294static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
1295{
1296	if (a->maj > b->maj) return -1;
1297	if (a->maj < b->maj) return 1;
1298
1299	if (a->min > b->min) return -1;
1300	if (a->min < b->min) return 1;
1301
1302	if (a->ino > b->ino) return -1;
1303	if (a->ino < b->ino) return 1;
1304
1305	/*
1306	 * Synthesized MMAP events have zero ino_generation, avoid comparing
1307	 * them with MMAP events with actual ino_generation.
1308	 *
1309	 * I found it harmful because the mismatch resulted in a new
1310	 * dso that did not have a build ID whereas the original dso did have a
1311	 * build ID. The build ID was essential because the object was not found
1312	 * otherwise. - Adrian
1313	 */
1314	if (a->ino_generation && b->ino_generation) {
1315		if (a->ino_generation > b->ino_generation) return -1;
1316		if (a->ino_generation < b->ino_generation) return 1;
1317	}
1318
1319	return 0;
1320}
1321
1322bool dso_id__empty(const struct dso_id *id)
1323{
1324	if (!id)
1325		return true;
1326
1327	return !id->maj && !id->min && !id->ino && !id->ino_generation;
1328}
1329
1330void __dso__inject_id(struct dso *dso, const struct dso_id *id)
1331{
1332	struct dsos *dsos = dso__dsos(dso);
1333	struct dso_id *dso_id = dso__id(dso);
1334
1335	/* dsos write lock held by caller. */
1336
1337	dso_id->maj = id->maj;
1338	dso_id->min = id->min;
1339	dso_id->ino = id->ino;
1340	dso_id->ino_generation = id->ino_generation;
1341
1342	if (dsos)
1343		dsos->sorted = false;
1344}
1345
1346int dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
1347{
1348	/*
1349	 * The second is always dso->id, so zeroes if not set, assume passing
1350	 * NULL for a means a zeroed id
1351	 */
1352	if (dso_id__empty(a) || dso_id__empty(b))
1353		return 0;
1354
1355	return __dso_id__cmp(a, b);
1356}
1357
1358int dso__cmp_id(struct dso *a, struct dso *b)
1359{
1360	return __dso_id__cmp(dso__id(a), dso__id(b));
1361}
1362
1363void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1364{
1365	dso__set_long_name_id(dso, name, name_allocated);
1366}
1367
1368void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1369{
1370	struct dsos *dsos = dso__dsos(dso);
1371
1372	if (name == NULL)
1373		return;
1374
1375	if (dsos) {
1376		/*
1377		 * Need to avoid re-sorting the dsos breaking by non-atomically
1378		 * renaming the dso.
1379		 */
1380		down_write(&dsos->lock);
1381	}
1382	if (dso__short_name_allocated(dso))
1383		free((char *)dso__short_name(dso));
1384
1385	RC_CHK_ACCESS(dso)->short_name		  = name;
1386	RC_CHK_ACCESS(dso)->short_name_len	  = strlen(name);
1387	dso__set_short_name_allocated(dso, name_allocated);
1388
1389	if (dsos) {
1390		dsos->sorted = false;
1391		up_write(&dsos->lock);
1392	}
1393}
1394
1395int dso__name_len(const struct dso *dso)
1396{
1397	if (!dso)
1398		return strlen("[unknown]");
1399	if (verbose > 0)
1400		return dso__long_name_len(dso);
1401
1402	return dso__short_name_len(dso);
1403}
1404
1405bool dso__loaded(const struct dso *dso)
1406{
1407	return RC_CHK_ACCESS(dso)->loaded;
1408}
1409
1410bool dso__sorted_by_name(const struct dso *dso)
1411{
1412	return RC_CHK_ACCESS(dso)->sorted_by_name;
1413}
1414
1415void dso__set_sorted_by_name(struct dso *dso)
1416{
1417	RC_CHK_ACCESS(dso)->sorted_by_name = true;
1418}
1419
1420struct dso *dso__new_id(const char *name, const struct dso_id *id)
1421{
1422	RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1);
1423	struct dso *res;
1424	struct dso_data *data;
1425
1426	if (!dso)
1427		return NULL;
1428
1429	if (ADD_RC_CHK(res, dso)) {
1430		strcpy(dso->name, name);
1431		if (id)
1432			dso->id = *id;
1433		dso__set_long_name_id(res, dso->name, false);
1434		dso__set_short_name(res, dso->name, false);
1435		dso->symbols = RB_ROOT_CACHED;
1436		dso->symbol_names = NULL;
1437		dso->symbol_names_len = 0;
1438		dso->inlined_nodes = RB_ROOT_CACHED;
1439		dso->srclines = RB_ROOT_CACHED;
1440		dso->data_types = RB_ROOT;
1441		dso->global_vars = RB_ROOT;
1442		dso->data.fd = -1;
1443		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1444		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1445		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1446		dso->is_64_bit = (sizeof(void *) == 8);
1447		dso->loaded = 0;
1448		dso->rel = 0;
1449		dso->sorted_by_name = 0;
1450		dso->has_build_id = 0;
1451		dso->has_srcline = 1;
1452		dso->a2l_fails = 1;
1453		dso->kernel = DSO_SPACE__USER;
1454		dso->is_kmod = 0;
1455		dso->needs_swap = DSO_SWAP__UNSET;
1456		dso->comp = COMP_ID__NONE;
 
 
 
 
1457		mutex_init(&dso->lock);
1458		refcount_set(&dso->refcnt, 1);
1459		data = &dso->data;
1460		data->cache = RB_ROOT;
1461		data->fd = -1;
1462		data->status = DSO_DATA_STATUS_UNKNOWN;
1463		INIT_LIST_HEAD(&data->open_entry);
1464#ifdef REFCNT_CHECKING
1465		data->dso = NULL; /* Set when on the open_entry list. */
1466#endif
1467	}
1468	return res;
 
1469}
1470
1471struct dso *dso__new(const char *name)
1472{
1473	return dso__new_id(name, NULL);
1474}
1475
1476void dso__delete(struct dso *dso)
1477{
1478	if (dso__dsos(dso))
1479		pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso));
 
1480
1481	/* free inlines first, as they reference symbols */
1482	inlines__tree_delete(&RC_CHK_ACCESS(dso)->inlined_nodes);
1483	srcline__tree_delete(&RC_CHK_ACCESS(dso)->srclines);
1484	symbols__delete(&RC_CHK_ACCESS(dso)->symbols);
1485	RC_CHK_ACCESS(dso)->symbol_names_len = 0;
1486	zfree(&RC_CHK_ACCESS(dso)->symbol_names);
1487	annotated_data_type__tree_delete(dso__data_types(dso));
1488	global_var_type__tree_delete(dso__global_vars(dso));
1489
1490	if (RC_CHK_ACCESS(dso)->short_name_allocated) {
1491		zfree((char **)&RC_CHK_ACCESS(dso)->short_name);
1492		RC_CHK_ACCESS(dso)->short_name_allocated = false;
1493	}
1494
1495	if (RC_CHK_ACCESS(dso)->long_name_allocated) {
1496		zfree((char **)&RC_CHK_ACCESS(dso)->long_name);
1497		RC_CHK_ACCESS(dso)->long_name_allocated = false;
1498	}
1499
1500	dso__data_close(dso);
1501	auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache);
1502	dso_cache__free(dso);
1503	dso__free_a2l(dso);
1504	dso__free_symsrc_filename(dso);
1505	nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo);
1506	mutex_destroy(dso__lock(dso));
1507	RC_CHK_FREE(dso);
1508}
1509
1510struct dso *dso__get(struct dso *dso)
1511{
1512	struct dso *result;
1513
1514	if (RC_CHK_GET(result, dso))
1515		refcount_inc(&RC_CHK_ACCESS(dso)->refcnt);
1516
1517	return result;
1518}
1519
1520void dso__put(struct dso *dso)
1521{
1522	if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt))
1523		dso__delete(dso);
1524	else
1525		RC_CHK_PUT(dso);
1526}
1527
1528void dso__set_build_id(struct dso *dso, struct build_id *bid)
1529{
1530	RC_CHK_ACCESS(dso)->bid = *bid;
1531	RC_CHK_ACCESS(dso)->has_build_id = 1;
1532}
1533
1534bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
1535{
1536	const struct build_id *dso_bid = dso__bid_const(dso);
1537
1538	if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) {
1539		/*
1540		 * For the backward compatibility, it allows a build-id has
1541		 * trailing zeros.
1542		 */
1543		return !memcmp(dso_bid->data, bid->data, bid->size) &&
1544			!memchr_inv(&dso_bid->data[bid->size], 0,
1545				    dso_bid->size - bid->size);
1546	}
1547
1548	return dso_bid->size == bid->size &&
1549	       memcmp(dso_bid->data, bid->data, dso_bid->size) == 0;
1550}
1551
1552void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1553{
1554	char path[PATH_MAX];
1555
1556	if (machine__is_default_guest(machine))
1557		return;
1558	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1559	if (sysfs__read_build_id(path, dso__bid(dso)) == 0)
1560		dso__set_has_build_id(dso);
1561}
1562
1563int dso__kernel_module_get_build_id(struct dso *dso,
1564				    const char *root_dir)
1565{
1566	char filename[PATH_MAX];
1567	/*
1568	 * kernel module short names are of the form "[module]" and
1569	 * we need just "module" here.
1570	 */
1571	const char *name = dso__short_name(dso) + 1;
1572
1573	snprintf(filename, sizeof(filename),
1574		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1575		 root_dir, (int)strlen(name) - 1, name);
1576
1577	if (sysfs__read_build_id(filename, dso__bid(dso)) == 0)
1578		dso__set_has_build_id(dso);
1579
1580	return 0;
1581}
1582
1583static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1584{
1585	char sbuild_id[SBUILD_ID_SIZE];
1586
1587	build_id__sprintf(dso__bid(dso), sbuild_id);
1588	return fprintf(fp, "%s", sbuild_id);
1589}
1590
1591size_t dso__fprintf(struct dso *dso, FILE *fp)
1592{
1593	struct rb_node *nd;
1594	size_t ret = fprintf(fp, "dso: %s (", dso__short_name(dso));
1595
1596	if (dso__short_name(dso) != dso__long_name(dso))
1597		ret += fprintf(fp, "%s, ", dso__long_name(dso));
1598	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1599	ret += dso__fprintf_buildid(dso, fp);
1600	ret += fprintf(fp, ")\n");
1601	for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) {
1602		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1603		ret += symbol__fprintf(pos, fp);
1604	}
1605
1606	return ret;
1607}
1608
1609enum dso_type dso__type(struct dso *dso, struct machine *machine)
1610{
1611	int fd;
1612	enum dso_type type = DSO__TYPE_UNKNOWN;
1613
1614	fd = dso__data_get_fd(dso, machine);
1615	if (fd >= 0) {
1616		type = dso__type_fd(fd);
1617		dso__data_put_fd(dso);
1618	}
1619
1620	return type;
1621}
1622
1623int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1624{
1625	int idx, errnum = *dso__load_errno(dso);
1626	/*
1627	 * This must have a same ordering as the enum dso_load_errno.
1628	 */
1629	static const char *dso_load__error_str[] = {
1630	"Internal tools/perf/ library error",
1631	"Invalid ELF file",
1632	"Can not read build id",
1633	"Mismatching build id",
1634	"Decompression failure",
1635	};
1636
1637	BUG_ON(buflen == 0);
1638
1639	if (errnum >= 0) {
1640		const char *err = str_error_r(errnum, buf, buflen);
1641
1642		if (err != buf)
1643			scnprintf(buf, buflen, "%s", err);
1644
1645		return 0;
1646	}
1647
1648	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1649		return -1;
1650
1651	idx = errnum - __DSO_LOAD_ERRNO__START;
1652	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1653	return 0;
1654}
1655
1656bool perf_pid_map_tid(const char *dso_name, int *tid)
1657{
1658	return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1;
1659}
1660
1661bool is_perf_pid_map_name(const char *dso_name)
1662{
1663	int tid;
1664
1665	return perf_pid_map_tid(dso_name, &tid);
1666}