Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#include <dirent.h>
   3#include <errno.h>
   4#include <inttypes.h>
   5#include <regex.h>
   6#include <stdlib.h>
   7#include "callchain.h"
   8#include "debug.h"
   9#include "dso.h"
  10#include "env.h"
  11#include "event.h"
  12#include "evsel.h"
  13#include "hist.h"
  14#include "machine.h"
  15#include "map.h"
  16#include "map_symbol.h"
  17#include "branch.h"
  18#include "mem-events.h"
  19#include "path.h"
  20#include "srcline.h"
  21#include "symbol.h"
  22#include "sort.h"
  23#include "strlist.h"
  24#include "target.h"
  25#include "thread.h"
  26#include "util.h"
  27#include "vdso.h"
  28#include <stdbool.h>
  29#include <sys/types.h>
  30#include <sys/stat.h>
  31#include <unistd.h>
  32#include "unwind.h"
  33#include "linux/hash.h"
  34#include "asm/bug.h"
  35#include "bpf-event.h"
  36#include <internal/lib.h> // page_size
  37#include "cgroup.h"
  38#include "arm64-frame-pointer-unwind-support.h"
  39
  40#include <linux/ctype.h>
  41#include <symbol/kallsyms.h>
  42#include <linux/mman.h>
  43#include <linux/string.h>
  44#include <linux/zalloc.h>
  45
  46static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
  47				     struct thread *th, bool lock);
  48
  49static struct dso *machine__kernel_dso(struct machine *machine)
  50{
  51	return map__dso(machine->vmlinux_map);
  52}
  53
  54static void dsos__init(struct dsos *dsos)
  55{
  56	INIT_LIST_HEAD(&dsos->head);
  57	dsos->root = RB_ROOT;
  58	init_rwsem(&dsos->lock);
  59}
  60
  61static void machine__threads_init(struct machine *machine)
  62{
  63	int i;
  64
  65	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  66		struct threads *threads = &machine->threads[i];
  67		threads->entries = RB_ROOT_CACHED;
  68		init_rwsem(&threads->lock);
  69		threads->nr = 0;
 
  70		threads->last_match = NULL;
  71	}
  72}
  73
  74static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd)
  75{
  76	int to_find = (int) *((pid_t *)key);
  77
  78	return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
  79}
  80
  81static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
  82						   struct rb_root *tree)
  83{
  84	pid_t to_find = thread__tid(th);
  85	struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid);
  86
  87	return rb_entry(nd, struct thread_rb_node, rb_node);
  88}
  89
  90static int machine__set_mmap_name(struct machine *machine)
  91{
  92	if (machine__is_host(machine))
  93		machine->mmap_name = strdup("[kernel.kallsyms]");
  94	else if (machine__is_default_guest(machine))
  95		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
  96	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
  97			  machine->pid) < 0)
  98		machine->mmap_name = NULL;
  99
 100	return machine->mmap_name ? 0 : -ENOMEM;
 101}
 102
 103static void thread__set_guest_comm(struct thread *thread, pid_t pid)
 104{
 105	char comm[64];
 106
 107	snprintf(comm, sizeof(comm), "[guest/%d]", pid);
 108	thread__set_comm(thread, comm, 0);
 109}
 110
 111int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
 112{
 113	int err = -ENOMEM;
 114
 115	memset(machine, 0, sizeof(*machine));
 116	machine->kmaps = maps__new(machine);
 117	if (machine->kmaps == NULL)
 118		return -ENOMEM;
 119
 120	RB_CLEAR_NODE(&machine->rb_node);
 121	dsos__init(&machine->dsos);
 122
 123	machine__threads_init(machine);
 124
 125	machine->vdso_info = NULL;
 126	machine->env = NULL;
 127
 128	machine->pid = pid;
 129
 130	machine->id_hdr_size = 0;
 131	machine->kptr_restrict_warned = false;
 132	machine->comm_exec = false;
 133	machine->kernel_start = 0;
 134	machine->vmlinux_map = NULL;
 135
 136	machine->root_dir = strdup(root_dir);
 137	if (machine->root_dir == NULL)
 138		goto out;
 139
 140	if (machine__set_mmap_name(machine))
 141		goto out;
 142
 143	if (pid != HOST_KERNEL_ID) {
 144		struct thread *thread = machine__findnew_thread(machine, -1,
 145								pid);
 146
 147		if (thread == NULL)
 148			goto out;
 149
 150		thread__set_guest_comm(thread, pid);
 151		thread__put(thread);
 152	}
 153
 154	machine->current_tid = NULL;
 155	err = 0;
 156
 157out:
 158	if (err) {
 159		zfree(&machine->kmaps);
 160		zfree(&machine->root_dir);
 161		zfree(&machine->mmap_name);
 162	}
 163	return 0;
 164}
 165
 166struct machine *machine__new_host(void)
 167{
 168	struct machine *machine = malloc(sizeof(*machine));
 169
 170	if (machine != NULL) {
 171		machine__init(machine, "", HOST_KERNEL_ID);
 172
 173		if (machine__create_kernel_maps(machine) < 0)
 174			goto out_delete;
 175	}
 176
 177	return machine;
 178out_delete:
 179	free(machine);
 180	return NULL;
 181}
 182
 183struct machine *machine__new_kallsyms(void)
 184{
 185	struct machine *machine = machine__new_host();
 186	/*
 187	 * FIXME:
 188	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
 189	 *    ask for not using the kcore parsing code, once this one is fixed
 190	 *    to create a map per module.
 191	 */
 192	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
 193		machine__delete(machine);
 194		machine = NULL;
 195	}
 196
 197	return machine;
 198}
 199
 200static void dsos__purge(struct dsos *dsos)
 201{
 202	struct dso *pos, *n;
 203
 204	down_write(&dsos->lock);
 205
 206	list_for_each_entry_safe(pos, n, &dsos->head, node) {
 207		RB_CLEAR_NODE(&pos->rb_node);
 208		pos->root = NULL;
 209		list_del_init(&pos->node);
 210		dso__put(pos);
 211	}
 212
 213	up_write(&dsos->lock);
 214}
 215
 216static void dsos__exit(struct dsos *dsos)
 217{
 218	dsos__purge(dsos);
 219	exit_rwsem(&dsos->lock);
 220}
 221
 222void machine__delete_threads(struct machine *machine)
 223{
 224	struct rb_node *nd;
 225	int i;
 226
 227	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 228		struct threads *threads = &machine->threads[i];
 229		down_write(&threads->lock);
 230		nd = rb_first_cached(&threads->entries);
 231		while (nd) {
 232			struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
 233
 234			nd = rb_next(nd);
 235			__machine__remove_thread(machine, trb, trb->thread, false);
 236		}
 237		up_write(&threads->lock);
 238	}
 239}
 240
 241void machine__exit(struct machine *machine)
 242{
 243	int i;
 244
 245	if (machine == NULL)
 246		return;
 247
 248	machine__destroy_kernel_maps(machine);
 249	maps__zput(machine->kmaps);
 250	dsos__exit(&machine->dsos);
 251	machine__exit_vdso(machine);
 252	zfree(&machine->root_dir);
 253	zfree(&machine->mmap_name);
 254	zfree(&machine->current_tid);
 255	zfree(&machine->kallsyms_filename);
 256
 257	machine__delete_threads(machine);
 258	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 259		struct threads *threads = &machine->threads[i];
 
 
 
 
 
 
 
 
 
 
 
 260
 261		exit_rwsem(&threads->lock);
 262	}
 263}
 264
 265void machine__delete(struct machine *machine)
 266{
 267	if (machine) {
 268		machine__exit(machine);
 269		free(machine);
 270	}
 271}
 272
 273void machines__init(struct machines *machines)
 274{
 275	machine__init(&machines->host, "", HOST_KERNEL_ID);
 276	machines->guests = RB_ROOT_CACHED;
 277}
 278
 279void machines__exit(struct machines *machines)
 280{
 281	machine__exit(&machines->host);
 282	/* XXX exit guest */
 283}
 284
 285struct machine *machines__add(struct machines *machines, pid_t pid,
 286			      const char *root_dir)
 287{
 288	struct rb_node **p = &machines->guests.rb_root.rb_node;
 289	struct rb_node *parent = NULL;
 290	struct machine *pos, *machine = malloc(sizeof(*machine));
 291	bool leftmost = true;
 292
 293	if (machine == NULL)
 294		return NULL;
 295
 296	if (machine__init(machine, root_dir, pid) != 0) {
 297		free(machine);
 298		return NULL;
 299	}
 300
 301	while (*p != NULL) {
 302		parent = *p;
 303		pos = rb_entry(parent, struct machine, rb_node);
 304		if (pid < pos->pid)
 305			p = &(*p)->rb_left;
 306		else {
 307			p = &(*p)->rb_right;
 308			leftmost = false;
 309		}
 310	}
 311
 312	rb_link_node(&machine->rb_node, parent, p);
 313	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
 314
 315	machine->machines = machines;
 316
 317	return machine;
 318}
 319
 320void machines__set_comm_exec(struct machines *machines, bool comm_exec)
 321{
 322	struct rb_node *nd;
 323
 324	machines->host.comm_exec = comm_exec;
 325
 326	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 327		struct machine *machine = rb_entry(nd, struct machine, rb_node);
 328
 329		machine->comm_exec = comm_exec;
 330	}
 331}
 332
 333struct machine *machines__find(struct machines *machines, pid_t pid)
 334{
 335	struct rb_node **p = &machines->guests.rb_root.rb_node;
 336	struct rb_node *parent = NULL;
 337	struct machine *machine;
 338	struct machine *default_machine = NULL;
 339
 340	if (pid == HOST_KERNEL_ID)
 341		return &machines->host;
 342
 343	while (*p != NULL) {
 344		parent = *p;
 345		machine = rb_entry(parent, struct machine, rb_node);
 346		if (pid < machine->pid)
 347			p = &(*p)->rb_left;
 348		else if (pid > machine->pid)
 349			p = &(*p)->rb_right;
 350		else
 351			return machine;
 352		if (!machine->pid)
 353			default_machine = machine;
 354	}
 355
 356	return default_machine;
 357}
 358
 359struct machine *machines__findnew(struct machines *machines, pid_t pid)
 360{
 361	char path[PATH_MAX];
 362	const char *root_dir = "";
 363	struct machine *machine = machines__find(machines, pid);
 364
 365	if (machine && (machine->pid == pid))
 366		goto out;
 367
 368	if ((pid != HOST_KERNEL_ID) &&
 369	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
 370	    (symbol_conf.guestmount)) {
 371		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
 372		if (access(path, R_OK)) {
 373			static struct strlist *seen;
 374
 375			if (!seen)
 376				seen = strlist__new(NULL, NULL);
 377
 378			if (!strlist__has_entry(seen, path)) {
 379				pr_err("Can't access file %s\n", path);
 380				strlist__add(seen, path);
 381			}
 382			machine = NULL;
 383			goto out;
 384		}
 385		root_dir = path;
 386	}
 387
 388	machine = machines__add(machines, pid, root_dir);
 389out:
 390	return machine;
 391}
 392
 393struct machine *machines__find_guest(struct machines *machines, pid_t pid)
 394{
 395	struct machine *machine = machines__find(machines, pid);
 396
 397	if (!machine)
 398		machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
 399	return machine;
 400}
 401
 402/*
 403 * A common case for KVM test programs is that the test program acts as the
 404 * hypervisor, creating, running and destroying the virtual machine, and
 405 * providing the guest object code from its own object code. In this case,
 406 * the VM is not running an OS, but only the functions loaded into it by the
 407 * hypervisor test program, and conveniently, loaded at the same virtual
 408 * addresses.
 409 *
 410 * Normally to resolve addresses, MMAP events are needed to map addresses
 411 * back to the object code and debug symbols for that object code.
 412 *
 413 * Currently, there is no way to get such mapping information from guests
 414 * but, in the scenario described above, the guest has the same mappings
 415 * as the hypervisor, so support for that scenario can be achieved.
 416 *
 417 * To support that, copy the host thread's maps to the guest thread's maps.
 418 * Note, we do not discover the guest until we encounter a guest event,
 419 * which works well because it is not until then that we know that the host
 420 * thread's maps have been set up.
 421 *
 422 * This function returns the guest thread. Apart from keeping the data
 423 * structures sane, using a thread belonging to the guest machine, instead
 424 * of the host thread, allows it to have its own comm (refer
 425 * thread__set_guest_comm()).
 426 */
 427static struct thread *findnew_guest_code(struct machine *machine,
 428					 struct machine *host_machine,
 429					 pid_t pid)
 430{
 431	struct thread *host_thread;
 432	struct thread *thread;
 433	int err;
 434
 435	if (!machine)
 436		return NULL;
 437
 438	thread = machine__findnew_thread(machine, -1, pid);
 439	if (!thread)
 440		return NULL;
 441
 442	/* Assume maps are set up if there are any */
 443	if (maps__nr_maps(thread__maps(thread)))
 444		return thread;
 445
 446	host_thread = machine__find_thread(host_machine, -1, pid);
 447	if (!host_thread)
 448		goto out_err;
 449
 450	thread__set_guest_comm(thread, pid);
 451
 452	/*
 453	 * Guest code can be found in hypervisor process at the same address
 454	 * so copy host maps.
 455	 */
 456	err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
 457	thread__put(host_thread);
 458	if (err)
 459		goto out_err;
 460
 461	return thread;
 462
 463out_err:
 464	thread__zput(thread);
 465	return NULL;
 466}
 467
 468struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
 469{
 470	struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
 471	struct machine *machine = machines__findnew(machines, pid);
 472
 473	return findnew_guest_code(machine, host_machine, pid);
 474}
 475
 476struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
 477{
 478	struct machines *machines = machine->machines;
 479	struct machine *host_machine;
 480
 481	if (!machines)
 482		return NULL;
 483
 484	host_machine = machines__find(machines, HOST_KERNEL_ID);
 485
 486	return findnew_guest_code(machine, host_machine, pid);
 487}
 488
 489void machines__process_guests(struct machines *machines,
 490			      machine__process_t process, void *data)
 491{
 492	struct rb_node *nd;
 493
 494	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 495		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 496		process(pos, data);
 497	}
 498}
 499
 500void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
 501{
 502	struct rb_node *node;
 503	struct machine *machine;
 504
 505	machines->host.id_hdr_size = id_hdr_size;
 506
 507	for (node = rb_first_cached(&machines->guests); node;
 508	     node = rb_next(node)) {
 509		machine = rb_entry(node, struct machine, rb_node);
 510		machine->id_hdr_size = id_hdr_size;
 511	}
 512
 513	return;
 514}
 515
 516static void machine__update_thread_pid(struct machine *machine,
 517				       struct thread *th, pid_t pid)
 518{
 519	struct thread *leader;
 520
 521	if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
 522		return;
 523
 524	thread__set_pid(th, pid);
 525
 526	if (thread__pid(th) == thread__tid(th))
 527		return;
 528
 529	leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
 530	if (!leader)
 531		goto out_err;
 532
 533	if (!thread__maps(leader))
 534		thread__set_maps(leader, maps__new(machine));
 535
 536	if (!thread__maps(leader))
 537		goto out_err;
 538
 539	if (thread__maps(th) == thread__maps(leader))
 540		goto out_put;
 541
 542	if (thread__maps(th)) {
 543		/*
 544		 * Maps are created from MMAP events which provide the pid and
 545		 * tid.  Consequently there never should be any maps on a thread
 546		 * with an unknown pid.  Just print an error if there are.
 547		 */
 548		if (!maps__empty(thread__maps(th)))
 549			pr_err("Discarding thread maps for %d:%d\n",
 550				thread__pid(th), thread__tid(th));
 551		maps__put(thread__maps(th));
 552	}
 553
 554	thread__set_maps(th, maps__get(thread__maps(leader)));
 555out_put:
 556	thread__put(leader);
 557	return;
 558out_err:
 559	pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
 560	goto out_put;
 561}
 562
 563/*
 564 * Front-end cache - TID lookups come in blocks,
 565 * so most of the time we dont have to look up
 566 * the full rbtree:
 567 */
 568static struct thread*
 569__threads__get_last_match(struct threads *threads, struct machine *machine,
 570			  int pid, int tid)
 571{
 572	struct thread *th;
 573
 574	th = threads->last_match;
 575	if (th != NULL) {
 576		if (thread__tid(th) == tid) {
 577			machine__update_thread_pid(machine, th, pid);
 578			return thread__get(th);
 579		}
 580		thread__put(threads->last_match);
 581		threads->last_match = NULL;
 582	}
 583
 584	return NULL;
 585}
 586
 587static struct thread*
 588threads__get_last_match(struct threads *threads, struct machine *machine,
 589			int pid, int tid)
 590{
 591	struct thread *th = NULL;
 592
 593	if (perf_singlethreaded)
 594		th = __threads__get_last_match(threads, machine, pid, tid);
 595
 596	return th;
 597}
 598
 599static void
 600__threads__set_last_match(struct threads *threads, struct thread *th)
 601{
 602	thread__put(threads->last_match);
 603	threads->last_match = thread__get(th);
 604}
 605
 606static void
 607threads__set_last_match(struct threads *threads, struct thread *th)
 608{
 609	if (perf_singlethreaded)
 610		__threads__set_last_match(threads, th);
 611}
 612
 613/*
 614 * Caller must eventually drop thread->refcnt returned with a successful
 615 * lookup/new thread inserted.
 616 */
 617static struct thread *____machine__findnew_thread(struct machine *machine,
 618						  struct threads *threads,
 619						  pid_t pid, pid_t tid,
 620						  bool create)
 621{
 622	struct rb_node **p = &threads->entries.rb_root.rb_node;
 623	struct rb_node *parent = NULL;
 624	struct thread *th;
 625	struct thread_rb_node *nd;
 626	bool leftmost = true;
 627
 628	th = threads__get_last_match(threads, machine, pid, tid);
 629	if (th)
 630		return th;
 631
 632	while (*p != NULL) {
 633		parent = *p;
 634		th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
 635
 636		if (thread__tid(th) == tid) {
 637			threads__set_last_match(threads, th);
 638			machine__update_thread_pid(machine, th, pid);
 639			return thread__get(th);
 640		}
 641
 642		if (tid < thread__tid(th))
 643			p = &(*p)->rb_left;
 644		else {
 645			p = &(*p)->rb_right;
 646			leftmost = false;
 647		}
 648	}
 649
 650	if (!create)
 651		return NULL;
 652
 653	th = thread__new(pid, tid);
 654	if (th == NULL)
 655		return NULL;
 656
 657	nd = malloc(sizeof(*nd));
 658	if (nd == NULL) {
 659		thread__put(th);
 660		return NULL;
 661	}
 662	nd->thread = th;
 663
 664	rb_link_node(&nd->rb_node, parent, p);
 665	rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost);
 666	/*
 667	 * We have to initialize maps separately after rb tree is updated.
 668	 *
 669	 * The reason is that we call machine__findnew_thread within
 670	 * thread__init_maps to find the thread leader and that would screwed
 671	 * the rb tree.
 672	 */
 673	if (thread__init_maps(th, machine)) {
 674		pr_err("Thread init failed thread %d\n", pid);
 675		rb_erase_cached(&nd->rb_node, &threads->entries);
 676		RB_CLEAR_NODE(&nd->rb_node);
 677		free(nd);
 678		thread__put(th);
 679		return NULL;
 
 
 
 680	}
 681	/*
 682	 * It is now in the rbtree, get a ref
 683	 */
 684	threads__set_last_match(threads, th);
 685	++threads->nr;
 686
 687	return thread__get(th);
 688}
 689
 690struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
 691{
 692	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
 693}
 694
 695struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
 696				       pid_t tid)
 697{
 698	struct threads *threads = machine__threads(machine, tid);
 699	struct thread *th;
 700
 701	down_write(&threads->lock);
 702	th = __machine__findnew_thread(machine, pid, tid);
 703	up_write(&threads->lock);
 704	return th;
 705}
 706
 707struct thread *machine__find_thread(struct machine *machine, pid_t pid,
 708				    pid_t tid)
 709{
 710	struct threads *threads = machine__threads(machine, tid);
 711	struct thread *th;
 712
 713	down_read(&threads->lock);
 714	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
 715	up_read(&threads->lock);
 716	return th;
 717}
 718
 719/*
 720 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
 721 * So here a single thread is created for that, but actually there is a separate
 722 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
 723 * is only 1. That causes problems for some tools, requiring workarounds. For
 724 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
 725 */
 726struct thread *machine__idle_thread(struct machine *machine)
 727{
 728	struct thread *thread = machine__findnew_thread(machine, 0, 0);
 729
 730	if (!thread || thread__set_comm(thread, "swapper", 0) ||
 731	    thread__set_namespaces(thread, 0, NULL))
 732		pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
 733
 734	return thread;
 735}
 736
 737struct comm *machine__thread_exec_comm(struct machine *machine,
 738				       struct thread *thread)
 739{
 740	if (machine->comm_exec)
 741		return thread__exec_comm(thread);
 742	else
 743		return thread__comm(thread);
 744}
 745
 746int machine__process_comm_event(struct machine *machine, union perf_event *event,
 747				struct perf_sample *sample)
 748{
 749	struct thread *thread = machine__findnew_thread(machine,
 750							event->comm.pid,
 751							event->comm.tid);
 752	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
 753	int err = 0;
 754
 755	if (exec)
 756		machine->comm_exec = true;
 757
 758	if (dump_trace)
 759		perf_event__fprintf_comm(event, stdout);
 760
 761	if (thread == NULL ||
 762	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
 763		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
 764		err = -1;
 765	}
 766
 767	thread__put(thread);
 768
 769	return err;
 770}
 771
 772int machine__process_namespaces_event(struct machine *machine __maybe_unused,
 773				      union perf_event *event,
 774				      struct perf_sample *sample __maybe_unused)
 775{
 776	struct thread *thread = machine__findnew_thread(machine,
 777							event->namespaces.pid,
 778							event->namespaces.tid);
 779	int err = 0;
 780
 781	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
 782		  "\nWARNING: kernel seems to support more namespaces than perf"
 783		  " tool.\nTry updating the perf tool..\n\n");
 784
 785	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
 786		  "\nWARNING: perf tool seems to support more namespaces than"
 787		  " the kernel.\nTry updating the kernel..\n\n");
 788
 789	if (dump_trace)
 790		perf_event__fprintf_namespaces(event, stdout);
 791
 792	if (thread == NULL ||
 793	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
 794		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
 795		err = -1;
 796	}
 797
 798	thread__put(thread);
 799
 800	return err;
 801}
 802
 803int machine__process_cgroup_event(struct machine *machine,
 804				  union perf_event *event,
 805				  struct perf_sample *sample __maybe_unused)
 806{
 807	struct cgroup *cgrp;
 808
 809	if (dump_trace)
 810		perf_event__fprintf_cgroup(event, stdout);
 811
 812	cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
 813	if (cgrp == NULL)
 814		return -ENOMEM;
 815
 816	return 0;
 817}
 818
 819int machine__process_lost_event(struct machine *machine __maybe_unused,
 820				union perf_event *event, struct perf_sample *sample __maybe_unused)
 821{
 822	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
 823		    event->lost.id, event->lost.lost);
 824	return 0;
 825}
 826
 827int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
 828					union perf_event *event, struct perf_sample *sample)
 829{
 830	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
 831		    sample->id, event->lost_samples.lost);
 832	return 0;
 833}
 834
 835static struct dso *machine__findnew_module_dso(struct machine *machine,
 836					       struct kmod_path *m,
 837					       const char *filename)
 838{
 839	struct dso *dso;
 840
 841	down_write(&machine->dsos.lock);
 842
 843	dso = __dsos__find(&machine->dsos, m->name, true);
 844	if (!dso) {
 845		dso = __dsos__addnew(&machine->dsos, m->name);
 846		if (dso == NULL)
 847			goto out_unlock;
 848
 849		dso__set_module_info(dso, m, machine);
 850		dso__set_long_name(dso, strdup(filename), true);
 851		dso->kernel = DSO_SPACE__KERNEL;
 852	}
 853
 854	dso__get(dso);
 855out_unlock:
 856	up_write(&machine->dsos.lock);
 857	return dso;
 858}
 859
 860int machine__process_aux_event(struct machine *machine __maybe_unused,
 861			       union perf_event *event)
 862{
 863	if (dump_trace)
 864		perf_event__fprintf_aux(event, stdout);
 865	return 0;
 866}
 867
 868int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
 869					union perf_event *event)
 870{
 871	if (dump_trace)
 872		perf_event__fprintf_itrace_start(event, stdout);
 873	return 0;
 874}
 875
 876int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
 877					    union perf_event *event)
 878{
 879	if (dump_trace)
 880		perf_event__fprintf_aux_output_hw_id(event, stdout);
 881	return 0;
 882}
 883
 884int machine__process_switch_event(struct machine *machine __maybe_unused,
 885				  union perf_event *event)
 886{
 887	if (dump_trace)
 888		perf_event__fprintf_switch(event, stdout);
 889	return 0;
 890}
 891
 892static int machine__process_ksymbol_register(struct machine *machine,
 893					     union perf_event *event,
 894					     struct perf_sample *sample __maybe_unused)
 895{
 896	struct symbol *sym;
 897	struct dso *dso;
 898	struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
 899	bool put_map = false;
 900	int err = 0;
 901
 902	if (!map) {
 903		dso = dso__new(event->ksymbol.name);
 904
 905		if (!dso) {
 906			err = -ENOMEM;
 907			goto out;
 
 908		}
 909		dso->kernel = DSO_SPACE__KERNEL;
 910		map = map__new2(0, dso);
 911		dso__put(dso);
 912		if (!map) {
 913			err = -ENOMEM;
 914			goto out;
 915		}
 916		/*
 917		 * The inserted map has a get on it, we need to put to release
 918		 * the reference count here, but do it after all accesses are
 919		 * done.
 920		 */
 921		put_map = true;
 922		if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
 923			dso->binary_type = DSO_BINARY_TYPE__OOL;
 924			dso->data.file_size = event->ksymbol.len;
 925			dso__set_loaded(dso);
 926		}
 927
 928		map__set_start(map, event->ksymbol.addr);
 929		map__set_end(map, map__start(map) + event->ksymbol.len);
 930		err = maps__insert(machine__kernel_maps(machine), map);
 931		if (err) {
 932			err = -ENOMEM;
 933			goto out;
 934		}
 935
 
 
 
 
 936		dso__set_loaded(dso);
 937
 938		if (is_bpf_image(event->ksymbol.name)) {
 939			dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
 940			dso__set_long_name(dso, "", false);
 941		}
 942	} else {
 943		dso = map__dso(map);
 944	}
 945
 946	sym = symbol__new(map__map_ip(map, map__start(map)),
 947			  event->ksymbol.len,
 948			  0, 0, event->ksymbol.name);
 949	if (!sym) {
 950		err = -ENOMEM;
 951		goto out;
 952	}
 953	dso__insert_symbol(dso, sym);
 954out:
 955	if (put_map)
 956		map__put(map);
 957	return err;
 958}
 959
 960static int machine__process_ksymbol_unregister(struct machine *machine,
 961					       union perf_event *event,
 962					       struct perf_sample *sample __maybe_unused)
 963{
 964	struct symbol *sym;
 965	struct map *map;
 966
 967	map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
 968	if (!map)
 969		return 0;
 970
 971	if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
 972		maps__remove(machine__kernel_maps(machine), map);
 973	else {
 974		struct dso *dso = map__dso(map);
 975
 976		sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
 977		if (sym)
 978			dso__delete_symbol(dso, sym);
 979	}
 980
 981	return 0;
 982}
 983
 984int machine__process_ksymbol(struct machine *machine __maybe_unused,
 985			     union perf_event *event,
 986			     struct perf_sample *sample)
 987{
 988	if (dump_trace)
 989		perf_event__fprintf_ksymbol(event, stdout);
 990
 991	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
 992		return machine__process_ksymbol_unregister(machine, event,
 993							   sample);
 994	return machine__process_ksymbol_register(machine, event, sample);
 995}
 996
 997int machine__process_text_poke(struct machine *machine, union perf_event *event,
 998			       struct perf_sample *sample __maybe_unused)
 999{
1000	struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
1001	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1002	struct dso *dso = map ? map__dso(map) : NULL;
1003
1004	if (dump_trace)
1005		perf_event__fprintf_text_poke(event, machine, stdout);
1006
1007	if (!event->text_poke.new_len)
1008		return 0;
1009
1010	if (cpumode != PERF_RECORD_MISC_KERNEL) {
1011		pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
1012		return 0;
1013	}
1014
1015	if (dso) {
1016		u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
1017		int ret;
1018
1019		/*
1020		 * Kernel maps might be changed when loading symbols so loading
1021		 * must be done prior to using kernel maps.
1022		 */
1023		map__load(map);
1024		ret = dso__data_write_cache_addr(dso, map, machine,
1025						 event->text_poke.addr,
1026						 new_bytes,
1027						 event->text_poke.new_len);
1028		if (ret != event->text_poke.new_len)
1029			pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
1030				 event->text_poke.addr);
1031	} else {
1032		pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
1033			 event->text_poke.addr);
1034	}
1035
1036	return 0;
1037}
1038
1039static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
1040					      const char *filename)
1041{
1042	struct map *map = NULL;
1043	struct kmod_path m;
1044	struct dso *dso;
1045	int err;
1046
1047	if (kmod_path__parse_name(&m, filename))
1048		return NULL;
1049
1050	dso = machine__findnew_module_dso(machine, &m, filename);
1051	if (dso == NULL)
1052		goto out;
1053
1054	map = map__new2(start, dso);
1055	if (map == NULL)
1056		goto out;
1057
1058	err = maps__insert(machine__kernel_maps(machine), map);
1059	/* If maps__insert failed, return NULL. */
1060	if (err) {
1061		map__put(map);
1062		map = NULL;
1063	}
1064out:
1065	/* put the dso here, corresponding to  machine__findnew_module_dso */
1066	dso__put(dso);
1067	zfree(&m.name);
1068	return map;
1069}
1070
1071size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
1072{
1073	struct rb_node *nd;
1074	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
1075
1076	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1077		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1078		ret += __dsos__fprintf(&pos->dsos.head, fp);
1079	}
1080
1081	return ret;
1082}
1083
1084size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
1085				     bool (skip)(struct dso *dso, int parm), int parm)
1086{
1087	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
1088}
1089
1090size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
1091				     bool (skip)(struct dso *dso, int parm), int parm)
1092{
1093	struct rb_node *nd;
1094	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
1095
1096	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1097		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1098		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
1099	}
1100	return ret;
1101}
1102
1103size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
1104{
1105	int i;
1106	size_t printed = 0;
1107	struct dso *kdso = machine__kernel_dso(machine);
1108
1109	if (kdso->has_build_id) {
1110		char filename[PATH_MAX];
1111		if (dso__build_id_filename(kdso, filename, sizeof(filename),
1112					   false))
1113			printed += fprintf(fp, "[0] %s\n", filename);
1114	}
1115
1116	for (i = 0; i < vmlinux_path__nr_entries; ++i)
1117		printed += fprintf(fp, "[%d] %s\n",
1118				   i + kdso->has_build_id, vmlinux_path[i]);
1119
1120	return printed;
1121}
1122
1123size_t machine__fprintf(struct machine *machine, FILE *fp)
1124{
1125	struct rb_node *nd;
1126	size_t ret;
1127	int i;
1128
1129	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
1130		struct threads *threads = &machine->threads[i];
1131
1132		down_read(&threads->lock);
1133
1134		ret = fprintf(fp, "Threads: %u\n", threads->nr);
1135
1136		for (nd = rb_first_cached(&threads->entries); nd;
1137		     nd = rb_next(nd)) {
1138			struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
1139
1140			ret += thread__fprintf(pos, fp);
1141		}
1142
1143		up_read(&threads->lock);
1144	}
1145	return ret;
1146}
1147
1148static struct dso *machine__get_kernel(struct machine *machine)
1149{
1150	const char *vmlinux_name = machine->mmap_name;
1151	struct dso *kernel;
1152
1153	if (machine__is_host(machine)) {
1154		if (symbol_conf.vmlinux_name)
1155			vmlinux_name = symbol_conf.vmlinux_name;
1156
1157		kernel = machine__findnew_kernel(machine, vmlinux_name,
1158						 "[kernel]", DSO_SPACE__KERNEL);
1159	} else {
1160		if (symbol_conf.default_guest_vmlinux_name)
1161			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1162
1163		kernel = machine__findnew_kernel(machine, vmlinux_name,
1164						 "[guest.kernel]",
1165						 DSO_SPACE__KERNEL_GUEST);
1166	}
1167
1168	if (kernel != NULL && (!kernel->has_build_id))
1169		dso__read_running_kernel_build_id(kernel, machine);
1170
1171	return kernel;
1172}
1173
1174void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1175				    size_t bufsz)
1176{
1177	if (machine__is_default_guest(machine))
1178		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1179	else
1180		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1181}
1182
1183const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1184
1185/* Figure out the start address of kernel map from /proc/kallsyms.
1186 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1187 * symbol_name if it's not that important.
1188 */
1189static int machine__get_running_kernel_start(struct machine *machine,
1190					     const char **symbol_name,
1191					     u64 *start, u64 *end)
1192{
1193	char filename[PATH_MAX];
1194	int i, err = -1;
1195	const char *name;
1196	u64 addr = 0;
1197
1198	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1199
1200	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1201		return 0;
1202
1203	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1204		err = kallsyms__get_function_start(filename, name, &addr);
1205		if (!err)
1206			break;
1207	}
1208
1209	if (err)
1210		return -1;
1211
1212	if (symbol_name)
1213		*symbol_name = name;
1214
1215	*start = addr;
1216
1217	err = kallsyms__get_symbol_start(filename, "_edata", &addr);
1218	if (err)
1219		err = kallsyms__get_function_start(filename, "_etext", &addr);
1220	if (!err)
1221		*end = addr;
1222
1223	return 0;
1224}
1225
1226int machine__create_extra_kernel_map(struct machine *machine,
1227				     struct dso *kernel,
1228				     struct extra_kernel_map *xm)
1229{
1230	struct kmap *kmap;
1231	struct map *map;
1232	int err;
1233
1234	map = map__new2(xm->start, kernel);
1235	if (!map)
1236		return -ENOMEM;
1237
1238	map__set_end(map, xm->end);
1239	map__set_pgoff(map, xm->pgoff);
1240
1241	kmap = map__kmap(map);
1242
1243	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1244
1245	err = maps__insert(machine__kernel_maps(machine), map);
1246
1247	if (!err) {
1248		pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1249			kmap->name, map__start(map), map__end(map));
1250	}
1251
1252	map__put(map);
1253
1254	return err;
1255}
1256
1257static u64 find_entry_trampoline(struct dso *dso)
1258{
1259	/* Duplicates are removed so lookup all aliases */
1260	const char *syms[] = {
1261		"_entry_trampoline",
1262		"__entry_trampoline_start",
1263		"entry_SYSCALL_64_trampoline",
1264	};
1265	struct symbol *sym = dso__first_symbol(dso);
1266	unsigned int i;
1267
1268	for (; sym; sym = dso__next_symbol(sym)) {
1269		if (sym->binding != STB_GLOBAL)
1270			continue;
1271		for (i = 0; i < ARRAY_SIZE(syms); i++) {
1272			if (!strcmp(sym->name, syms[i]))
1273				return sym->start;
1274		}
1275	}
1276
1277	return 0;
1278}
1279
1280/*
1281 * These values can be used for kernels that do not have symbols for the entry
1282 * trampolines in kallsyms.
1283 */
1284#define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
1285#define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
1286#define X86_64_ENTRY_TRAMPOLINE		0x6000
1287
1288struct machine__map_x86_64_entry_trampolines_args {
1289	struct maps *kmaps;
1290	bool found;
1291};
1292
1293static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
1294{
1295	struct machine__map_x86_64_entry_trampolines_args *args = data;
1296	struct map *dest_map;
1297	struct kmap *kmap = __map__kmap(map);
1298
1299	if (!kmap || !is_entry_trampoline(kmap->name))
1300		return 0;
1301
1302	dest_map = maps__find(args->kmaps, map__pgoff(map));
1303	if (dest_map != map)
1304		map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1305
1306	args->found = true;
1307	return 0;
1308}
1309
1310/* Map x86_64 PTI entry trampolines */
1311int machine__map_x86_64_entry_trampolines(struct machine *machine,
1312					  struct dso *kernel)
1313{
1314	struct machine__map_x86_64_entry_trampolines_args args = {
1315		.kmaps = machine__kernel_maps(machine),
1316		.found = false,
1317	};
1318	int nr_cpus_avail, cpu;
 
 
1319	u64 pgoff;
1320
1321	/*
1322	 * In the vmlinux case, pgoff is a virtual address which must now be
1323	 * mapped to a vmlinux offset.
1324	 */
1325	maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
 
 
 
 
 
1326
1327	if (args.found || machine->trampolines_mapped)
 
 
 
 
 
1328		return 0;
1329
1330	pgoff = find_entry_trampoline(kernel);
1331	if (!pgoff)
1332		return 0;
1333
1334	nr_cpus_avail = machine__nr_cpus_avail(machine);
1335
1336	/* Add a 1 page map for each CPU's entry trampoline */
1337	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1338		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1339			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1340			 X86_64_ENTRY_TRAMPOLINE;
1341		struct extra_kernel_map xm = {
1342			.start = va,
1343			.end   = va + page_size,
1344			.pgoff = pgoff,
1345		};
1346
1347		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1348
1349		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1350			return -1;
1351	}
1352
1353	machine->trampolines_mapped = nr_cpus_avail;
1354
1355	return 0;
1356}
1357
1358int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1359					     struct dso *kernel __maybe_unused)
1360{
1361	return 0;
1362}
1363
1364static int
1365__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1366{
1367	/* In case of renewal the kernel map, destroy previous one */
1368	machine__destroy_kernel_maps(machine);
1369
1370	map__put(machine->vmlinux_map);
1371	machine->vmlinux_map = map__new2(0, kernel);
1372	if (machine->vmlinux_map == NULL)
1373		return -ENOMEM;
1374
1375	map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
1376	return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
 
1377}
1378
1379void machine__destroy_kernel_maps(struct machine *machine)
1380{
1381	struct kmap *kmap;
1382	struct map *map = machine__kernel_map(machine);
1383
1384	if (map == NULL)
1385		return;
1386
1387	kmap = map__kmap(map);
1388	maps__remove(machine__kernel_maps(machine), map);
1389	if (kmap && kmap->ref_reloc_sym) {
1390		zfree((char **)&kmap->ref_reloc_sym->name);
1391		zfree(&kmap->ref_reloc_sym);
1392	}
1393
1394	map__zput(machine->vmlinux_map);
1395}
1396
1397int machines__create_guest_kernel_maps(struct machines *machines)
1398{
1399	int ret = 0;
1400	struct dirent **namelist = NULL;
1401	int i, items = 0;
1402	char path[PATH_MAX];
1403	pid_t pid;
1404	char *endp;
1405
1406	if (symbol_conf.default_guest_vmlinux_name ||
1407	    symbol_conf.default_guest_modules ||
1408	    symbol_conf.default_guest_kallsyms) {
1409		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1410	}
1411
1412	if (symbol_conf.guestmount) {
1413		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1414		if (items <= 0)
1415			return -ENOENT;
1416		for (i = 0; i < items; i++) {
1417			if (!isdigit(namelist[i]->d_name[0])) {
1418				/* Filter out . and .. */
1419				continue;
1420			}
1421			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1422			if ((*endp != '\0') ||
1423			    (endp == namelist[i]->d_name) ||
1424			    (errno == ERANGE)) {
1425				pr_debug("invalid directory (%s). Skipping.\n",
1426					 namelist[i]->d_name);
1427				continue;
1428			}
1429			sprintf(path, "%s/%s/proc/kallsyms",
1430				symbol_conf.guestmount,
1431				namelist[i]->d_name);
1432			ret = access(path, R_OK);
1433			if (ret) {
1434				pr_debug("Can't access file %s\n", path);
1435				goto failure;
1436			}
1437			machines__create_kernel_maps(machines, pid);
1438		}
1439failure:
1440		free(namelist);
1441	}
1442
1443	return ret;
1444}
1445
1446void machines__destroy_kernel_maps(struct machines *machines)
1447{
1448	struct rb_node *next = rb_first_cached(&machines->guests);
1449
1450	machine__destroy_kernel_maps(&machines->host);
1451
1452	while (next) {
1453		struct machine *pos = rb_entry(next, struct machine, rb_node);
1454
1455		next = rb_next(&pos->rb_node);
1456		rb_erase_cached(&pos->rb_node, &machines->guests);
1457		machine__delete(pos);
1458	}
1459}
1460
1461int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1462{
1463	struct machine *machine = machines__findnew(machines, pid);
1464
1465	if (machine == NULL)
1466		return -1;
1467
1468	return machine__create_kernel_maps(machine);
1469}
1470
1471int machine__load_kallsyms(struct machine *machine, const char *filename)
1472{
1473	struct map *map = machine__kernel_map(machine);
1474	struct dso *dso = map__dso(map);
1475	int ret = __dso__load_kallsyms(dso, filename, map, true);
1476
1477	if (ret > 0) {
1478		dso__set_loaded(dso);
1479		/*
1480		 * Since /proc/kallsyms will have multiple sessions for the
1481		 * kernel, with modules between them, fixup the end of all
1482		 * sections.
1483		 */
1484		maps__fixup_end(machine__kernel_maps(machine));
1485	}
1486
1487	return ret;
1488}
1489
1490int machine__load_vmlinux_path(struct machine *machine)
1491{
1492	struct map *map = machine__kernel_map(machine);
1493	struct dso *dso = map__dso(map);
1494	int ret = dso__load_vmlinux_path(dso, map);
1495
1496	if (ret > 0)
1497		dso__set_loaded(dso);
1498
1499	return ret;
1500}
1501
1502static char *get_kernel_version(const char *root_dir)
1503{
1504	char version[PATH_MAX];
1505	FILE *file;
1506	char *name, *tmp;
1507	const char *prefix = "Linux version ";
1508
1509	sprintf(version, "%s/proc/version", root_dir);
1510	file = fopen(version, "r");
1511	if (!file)
1512		return NULL;
1513
1514	tmp = fgets(version, sizeof(version), file);
1515	fclose(file);
1516	if (!tmp)
1517		return NULL;
1518
1519	name = strstr(version, prefix);
1520	if (!name)
1521		return NULL;
1522	name += strlen(prefix);
1523	tmp = strchr(name, ' ');
1524	if (tmp)
1525		*tmp = '\0';
1526
1527	return strdup(name);
1528}
1529
1530static bool is_kmod_dso(struct dso *dso)
1531{
1532	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1533	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1534}
1535
1536static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1537{
1538	char *long_name;
1539	struct dso *dso;
1540	struct map *map = maps__find_by_name(maps, m->name);
1541
1542	if (map == NULL)
1543		return 0;
1544
1545	long_name = strdup(path);
1546	if (long_name == NULL)
1547		return -ENOMEM;
1548
1549	dso = map__dso(map);
1550	dso__set_long_name(dso, long_name, true);
1551	dso__kernel_module_get_build_id(dso, "");
1552
1553	/*
1554	 * Full name could reveal us kmod compression, so
1555	 * we need to update the symtab_type if needed.
1556	 */
1557	if (m->comp && is_kmod_dso(dso)) {
1558		dso->symtab_type++;
1559		dso->comp = m->comp;
1560	}
1561
1562	return 0;
1563}
1564
1565static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1566{
1567	struct dirent *dent;
1568	DIR *dir = opendir(dir_name);
1569	int ret = 0;
1570
1571	if (!dir) {
1572		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1573		return -1;
1574	}
1575
1576	while ((dent = readdir(dir)) != NULL) {
1577		char path[PATH_MAX];
1578		struct stat st;
1579
1580		/*sshfs might return bad dent->d_type, so we have to stat*/
1581		path__join(path, sizeof(path), dir_name, dent->d_name);
1582		if (stat(path, &st))
1583			continue;
1584
1585		if (S_ISDIR(st.st_mode)) {
1586			if (!strcmp(dent->d_name, ".") ||
1587			    !strcmp(dent->d_name, ".."))
1588				continue;
1589
1590			/* Do not follow top-level source and build symlinks */
1591			if (depth == 0) {
1592				if (!strcmp(dent->d_name, "source") ||
1593				    !strcmp(dent->d_name, "build"))
1594					continue;
1595			}
1596
1597			ret = maps__set_modules_path_dir(maps, path, depth + 1);
1598			if (ret < 0)
1599				goto out;
1600		} else {
1601			struct kmod_path m;
1602
1603			ret = kmod_path__parse_name(&m, dent->d_name);
1604			if (ret)
1605				goto out;
1606
1607			if (m.kmod)
1608				ret = maps__set_module_path(maps, path, &m);
1609
1610			zfree(&m.name);
1611
1612			if (ret)
1613				goto out;
1614		}
1615	}
1616
1617out:
1618	closedir(dir);
1619	return ret;
1620}
1621
1622static int machine__set_modules_path(struct machine *machine)
1623{
1624	char *version;
1625	char modules_path[PATH_MAX];
1626
1627	version = get_kernel_version(machine->root_dir);
1628	if (!version)
1629		return -1;
1630
1631	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1632		 machine->root_dir, version);
1633	free(version);
1634
1635	return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1636}
1637int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1638				u64 *size __maybe_unused,
1639				const char *name __maybe_unused)
1640{
1641	return 0;
1642}
1643
1644static int machine__create_module(void *arg, const char *name, u64 start,
1645				  u64 size)
1646{
1647	struct machine *machine = arg;
1648	struct map *map;
1649
1650	if (arch__fix_module_text_start(&start, &size, name) < 0)
1651		return -1;
1652
1653	map = machine__addnew_module_map(machine, start, name);
1654	if (map == NULL)
1655		return -1;
1656	map__set_end(map, start + size);
 
 
1657
1658	dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1659	map__put(map);
1660	return 0;
1661}
1662
1663static int machine__create_modules(struct machine *machine)
1664{
1665	const char *modules;
1666	char path[PATH_MAX];
1667
1668	if (machine__is_default_guest(machine)) {
1669		modules = symbol_conf.default_guest_modules;
1670	} else {
1671		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1672		modules = path;
1673	}
1674
1675	if (symbol__restricted_filename(modules, "/proc/modules"))
1676		return -1;
1677
1678	if (modules__parse(modules, machine, machine__create_module))
1679		return -1;
1680
1681	if (!machine__set_modules_path(machine))
1682		return 0;
1683
1684	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1685
1686	return 0;
1687}
1688
1689static void machine__set_kernel_mmap(struct machine *machine,
1690				     u64 start, u64 end)
1691{
1692	map__set_start(machine->vmlinux_map, start);
1693	map__set_end(machine->vmlinux_map, end);
1694	/*
1695	 * Be a bit paranoid here, some perf.data file came with
1696	 * a zero sized synthesized MMAP event for the kernel.
1697	 */
1698	if (start == 0 && end == 0)
1699		map__set_end(machine->vmlinux_map, ~0ULL);
1700}
1701
1702static int machine__update_kernel_mmap(struct machine *machine,
1703				     u64 start, u64 end)
1704{
1705	struct map *orig, *updated;
1706	int err;
1707
1708	orig = machine->vmlinux_map;
1709	updated = map__get(orig);
1710
1711	machine->vmlinux_map = updated;
1712	machine__set_kernel_mmap(machine, start, end);
1713	maps__remove(machine__kernel_maps(machine), orig);
1714	err = maps__insert(machine__kernel_maps(machine), updated);
1715	map__put(orig);
1716
1717	return err;
 
1718}
1719
1720int machine__create_kernel_maps(struct machine *machine)
1721{
1722	struct dso *kernel = machine__get_kernel(machine);
1723	const char *name = NULL;
 
1724	u64 start = 0, end = ~0ULL;
1725	int ret;
1726
1727	if (kernel == NULL)
1728		return -1;
1729
1730	ret = __machine__create_kernel_maps(machine, kernel);
1731	if (ret < 0)
1732		goto out_put;
1733
1734	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1735		if (machine__is_host(machine))
1736			pr_debug("Problems creating module maps, "
1737				 "continuing anyway...\n");
1738		else
1739			pr_debug("Problems creating module maps for guest %d, "
1740				 "continuing anyway...\n", machine->pid);
1741	}
1742
1743	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1744		if (name &&
1745		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1746			machine__destroy_kernel_maps(machine);
1747			ret = -1;
1748			goto out_put;
1749		}
1750
1751		/*
1752		 * we have a real start address now, so re-order the kmaps
1753		 * assume it's the last in the kmaps
1754		 */
1755		ret = machine__update_kernel_mmap(machine, start, end);
1756		if (ret < 0)
1757			goto out_put;
1758	}
1759
1760	if (machine__create_extra_kernel_maps(machine, kernel))
1761		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1762
1763	if (end == ~0ULL) {
1764		/* update end address of the kernel map using adjacent module address */
1765		struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
1766							 machine__kernel_map(machine));
1767
1768		if (next)
1769			machine__set_kernel_mmap(machine, start, map__start(next));
1770	}
1771
1772out_put:
1773	dso__put(kernel);
1774	return ret;
1775}
1776
1777static bool machine__uses_kcore(struct machine *machine)
1778{
1779	struct dso *dso;
1780
1781	list_for_each_entry(dso, &machine->dsos.head, node) {
1782		if (dso__is_kcore(dso))
1783			return true;
1784	}
1785
1786	return false;
1787}
1788
1789static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1790					     struct extra_kernel_map *xm)
1791{
1792	return machine__is(machine, "x86_64") &&
1793	       is_entry_trampoline(xm->name);
1794}
1795
1796static int machine__process_extra_kernel_map(struct machine *machine,
1797					     struct extra_kernel_map *xm)
1798{
1799	struct dso *kernel = machine__kernel_dso(machine);
1800
1801	if (kernel == NULL)
1802		return -1;
1803
1804	return machine__create_extra_kernel_map(machine, kernel, xm);
1805}
1806
1807static int machine__process_kernel_mmap_event(struct machine *machine,
1808					      struct extra_kernel_map *xm,
1809					      struct build_id *bid)
1810{
 
1811	enum dso_space_type dso_space;
1812	bool is_kernel_mmap;
1813	const char *mmap_name = machine->mmap_name;
1814
1815	/* If we have maps from kcore then we do not need or want any others */
1816	if (machine__uses_kcore(machine))
1817		return 0;
1818
1819	if (machine__is_host(machine))
1820		dso_space = DSO_SPACE__KERNEL;
1821	else
1822		dso_space = DSO_SPACE__KERNEL_GUEST;
1823
1824	is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1825	if (!is_kernel_mmap && !machine__is_host(machine)) {
1826		/*
1827		 * If the event was recorded inside the guest and injected into
1828		 * the host perf.data file, then it will match a host mmap_name,
1829		 * so try that - see machine__set_mmap_name().
1830		 */
1831		mmap_name = "[kernel.kallsyms]";
1832		is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1833	}
1834	if (xm->name[0] == '/' ||
1835	    (!is_kernel_mmap && xm->name[0] == '[')) {
1836		struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
1837
1838		if (map == NULL)
1839			goto out_problem;
1840
1841		map__set_end(map, map__start(map) + xm->end - xm->start);
1842
1843		if (build_id__is_defined(bid))
1844			dso__set_build_id(map__dso(map), bid);
1845
1846		map__put(map);
1847	} else if (is_kernel_mmap) {
1848		const char *symbol_name = xm->name + strlen(mmap_name);
1849		/*
1850		 * Should be there already, from the build-id table in
1851		 * the header.
1852		 */
1853		struct dso *kernel = NULL;
1854		struct dso *dso;
1855
1856		down_read(&machine->dsos.lock);
1857
1858		list_for_each_entry(dso, &machine->dsos.head, node) {
1859
1860			/*
1861			 * The cpumode passed to is_kernel_module is not the
1862			 * cpumode of *this* event. If we insist on passing
1863			 * correct cpumode to is_kernel_module, we should
1864			 * record the cpumode when we adding this dso to the
1865			 * linked list.
1866			 *
1867			 * However we don't really need passing correct
1868			 * cpumode.  We know the correct cpumode must be kernel
1869			 * mode (if not, we should not link it onto kernel_dsos
1870			 * list).
1871			 *
1872			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1873			 * is_kernel_module() treats it as a kernel cpumode.
1874			 */
1875
1876			if (!dso->kernel ||
1877			    is_kernel_module(dso->long_name,
1878					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1879				continue;
1880
1881
1882			kernel = dso__get(dso);
1883			break;
1884		}
1885
1886		up_read(&machine->dsos.lock);
1887
1888		if (kernel == NULL)
1889			kernel = machine__findnew_dso(machine, machine->mmap_name);
1890		if (kernel == NULL)
1891			goto out_problem;
1892
1893		kernel->kernel = dso_space;
1894		if (__machine__create_kernel_maps(machine, kernel) < 0) {
1895			dso__put(kernel);
1896			goto out_problem;
1897		}
1898
1899		if (strstr(kernel->long_name, "vmlinux"))
1900			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1901
1902		if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1903			dso__put(kernel);
1904			goto out_problem;
1905		}
1906
1907		if (build_id__is_defined(bid))
1908			dso__set_build_id(kernel, bid);
1909
1910		/*
1911		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1912		 * symbol. Effectively having zero here means that at record
1913		 * time /proc/sys/kernel/kptr_restrict was non zero.
1914		 */
1915		if (xm->pgoff != 0) {
1916			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1917							symbol_name,
1918							xm->pgoff);
1919		}
1920
1921		if (machine__is_default_guest(machine)) {
1922			/*
1923			 * preload dso of guest kernel and modules
1924			 */
1925			dso__load(kernel, machine__kernel_map(machine));
1926		}
1927		dso__put(kernel);
1928	} else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1929		return machine__process_extra_kernel_map(machine, xm);
1930	}
1931	return 0;
1932out_problem:
1933	return -1;
1934}
1935
1936int machine__process_mmap2_event(struct machine *machine,
1937				 union perf_event *event,
1938				 struct perf_sample *sample)
1939{
1940	struct thread *thread;
1941	struct map *map;
1942	struct dso_id dso_id = {
1943		.maj = event->mmap2.maj,
1944		.min = event->mmap2.min,
1945		.ino = event->mmap2.ino,
1946		.ino_generation = event->mmap2.ino_generation,
1947	};
1948	struct build_id __bid, *bid = NULL;
1949	int ret = 0;
1950
1951	if (dump_trace)
1952		perf_event__fprintf_mmap2(event, stdout);
1953
1954	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1955		bid = &__bid;
1956		build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1957	}
1958
1959	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1960	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1961		struct extra_kernel_map xm = {
1962			.start = event->mmap2.start,
1963			.end   = event->mmap2.start + event->mmap2.len,
1964			.pgoff = event->mmap2.pgoff,
1965		};
1966
1967		strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1968		ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1969		if (ret < 0)
1970			goto out_problem;
1971		return 0;
1972	}
1973
1974	thread = machine__findnew_thread(machine, event->mmap2.pid,
1975					event->mmap2.tid);
1976	if (thread == NULL)
1977		goto out_problem;
1978
1979	map = map__new(machine, event->mmap2.start,
1980			event->mmap2.len, event->mmap2.pgoff,
1981			&dso_id, event->mmap2.prot,
1982			event->mmap2.flags, bid,
1983			event->mmap2.filename, thread);
1984
1985	if (map == NULL)
1986		goto out_problem_map;
1987
1988	ret = thread__insert_map(thread, map);
1989	if (ret)
1990		goto out_problem_insert;
1991
1992	thread__put(thread);
1993	map__put(map);
1994	return 0;
1995
1996out_problem_insert:
1997	map__put(map);
1998out_problem_map:
1999	thread__put(thread);
2000out_problem:
2001	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
2002	return 0;
2003}
2004
2005int machine__process_mmap_event(struct machine *machine, union perf_event *event,
2006				struct perf_sample *sample)
2007{
2008	struct thread *thread;
2009	struct map *map;
2010	u32 prot = 0;
2011	int ret = 0;
2012
2013	if (dump_trace)
2014		perf_event__fprintf_mmap(event, stdout);
2015
2016	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
2017	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
2018		struct extra_kernel_map xm = {
2019			.start = event->mmap.start,
2020			.end   = event->mmap.start + event->mmap.len,
2021			.pgoff = event->mmap.pgoff,
2022		};
2023
2024		strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
2025		ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
2026		if (ret < 0)
2027			goto out_problem;
2028		return 0;
2029	}
2030
2031	thread = machine__findnew_thread(machine, event->mmap.pid,
2032					 event->mmap.tid);
2033	if (thread == NULL)
2034		goto out_problem;
2035
2036	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
2037		prot = PROT_EXEC;
2038
2039	map = map__new(machine, event->mmap.start,
2040			event->mmap.len, event->mmap.pgoff,
2041			NULL, prot, 0, NULL, event->mmap.filename, thread);
2042
2043	if (map == NULL)
2044		goto out_problem_map;
2045
2046	ret = thread__insert_map(thread, map);
2047	if (ret)
2048		goto out_problem_insert;
2049
2050	thread__put(thread);
2051	map__put(map);
2052	return 0;
2053
2054out_problem_insert:
2055	map__put(map);
2056out_problem_map:
2057	thread__put(thread);
2058out_problem:
2059	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
2060	return 0;
2061}
2062
2063static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
2064				     struct thread *th, bool lock)
2065{
2066	struct threads *threads = machine__threads(machine, thread__tid(th));
2067
2068	if (!nd)
2069		nd = thread_rb_node__find(th, &threads->entries.rb_root);
2070
2071	if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th))
2072		threads__set_last_match(threads, NULL);
2073
2074	if (lock)
2075		down_write(&threads->lock);
2076
2077	BUG_ON(refcount_read(thread__refcnt(th)) == 0);
2078
2079	thread__put(nd->thread);
2080	rb_erase_cached(&nd->rb_node, &threads->entries);
2081	RB_CLEAR_NODE(&nd->rb_node);
2082	--threads->nr;
 
 
 
 
 
 
2083
2084	free(nd);
 
 
 
 
 
2085
2086	if (lock)
2087		up_write(&threads->lock);
2088}
2089
2090void machine__remove_thread(struct machine *machine, struct thread *th)
2091{
2092	return __machine__remove_thread(machine, NULL, th, true);
2093}
2094
2095int machine__process_fork_event(struct machine *machine, union perf_event *event,
2096				struct perf_sample *sample)
2097{
2098	struct thread *thread = machine__find_thread(machine,
2099						     event->fork.pid,
2100						     event->fork.tid);
2101	struct thread *parent = machine__findnew_thread(machine,
2102							event->fork.ppid,
2103							event->fork.ptid);
2104	bool do_maps_clone = true;
2105	int err = 0;
2106
2107	if (dump_trace)
2108		perf_event__fprintf_task(event, stdout);
2109
2110	/*
2111	 * There may be an existing thread that is not actually the parent,
2112	 * either because we are processing events out of order, or because the
2113	 * (fork) event that would have removed the thread was lost. Assume the
2114	 * latter case and continue on as best we can.
2115	 */
2116	if (thread__pid(parent) != (pid_t)event->fork.ppid) {
2117		dump_printf("removing erroneous parent thread %d/%d\n",
2118			    thread__pid(parent), thread__tid(parent));
2119		machine__remove_thread(machine, parent);
2120		thread__put(parent);
2121		parent = machine__findnew_thread(machine, event->fork.ppid,
2122						 event->fork.ptid);
2123	}
2124
2125	/* if a thread currently exists for the thread id remove it */
2126	if (thread != NULL) {
2127		machine__remove_thread(machine, thread);
2128		thread__put(thread);
2129	}
2130
2131	thread = machine__findnew_thread(machine, event->fork.pid,
2132					 event->fork.tid);
2133	/*
2134	 * When synthesizing FORK events, we are trying to create thread
2135	 * objects for the already running tasks on the machine.
2136	 *
2137	 * Normally, for a kernel FORK event, we want to clone the parent's
2138	 * maps because that is what the kernel just did.
2139	 *
2140	 * But when synthesizing, this should not be done.  If we do, we end up
2141	 * with overlapping maps as we process the synthesized MMAP2 events that
2142	 * get delivered shortly thereafter.
2143	 *
2144	 * Use the FORK event misc flags in an internal way to signal this
2145	 * situation, so we can elide the map clone when appropriate.
2146	 */
2147	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
2148		do_maps_clone = false;
2149
2150	if (thread == NULL || parent == NULL ||
2151	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
2152		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
2153		err = -1;
2154	}
2155	thread__put(thread);
2156	thread__put(parent);
2157
2158	return err;
2159}
2160
2161int machine__process_exit_event(struct machine *machine, union perf_event *event,
2162				struct perf_sample *sample __maybe_unused)
2163{
2164	struct thread *thread = machine__find_thread(machine,
2165						     event->fork.pid,
2166						     event->fork.tid);
2167
2168	if (dump_trace)
2169		perf_event__fprintf_task(event, stdout);
2170
2171	if (thread != NULL) {
2172		if (symbol_conf.keep_exited_threads)
2173			thread__set_exited(thread, /*exited=*/true);
2174		else
2175			machine__remove_thread(machine, thread);
2176	}
2177	thread__put(thread);
2178	return 0;
2179}
2180
2181int machine__process_event(struct machine *machine, union perf_event *event,
2182			   struct perf_sample *sample)
2183{
2184	int ret;
2185
2186	switch (event->header.type) {
2187	case PERF_RECORD_COMM:
2188		ret = machine__process_comm_event(machine, event, sample); break;
2189	case PERF_RECORD_MMAP:
2190		ret = machine__process_mmap_event(machine, event, sample); break;
2191	case PERF_RECORD_NAMESPACES:
2192		ret = machine__process_namespaces_event(machine, event, sample); break;
2193	case PERF_RECORD_CGROUP:
2194		ret = machine__process_cgroup_event(machine, event, sample); break;
2195	case PERF_RECORD_MMAP2:
2196		ret = machine__process_mmap2_event(machine, event, sample); break;
2197	case PERF_RECORD_FORK:
2198		ret = machine__process_fork_event(machine, event, sample); break;
2199	case PERF_RECORD_EXIT:
2200		ret = machine__process_exit_event(machine, event, sample); break;
2201	case PERF_RECORD_LOST:
2202		ret = machine__process_lost_event(machine, event, sample); break;
2203	case PERF_RECORD_AUX:
2204		ret = machine__process_aux_event(machine, event); break;
2205	case PERF_RECORD_ITRACE_START:
2206		ret = machine__process_itrace_start_event(machine, event); break;
2207	case PERF_RECORD_LOST_SAMPLES:
2208		ret = machine__process_lost_samples_event(machine, event, sample); break;
2209	case PERF_RECORD_SWITCH:
2210	case PERF_RECORD_SWITCH_CPU_WIDE:
2211		ret = machine__process_switch_event(machine, event); break;
2212	case PERF_RECORD_KSYMBOL:
2213		ret = machine__process_ksymbol(machine, event, sample); break;
2214	case PERF_RECORD_BPF_EVENT:
2215		ret = machine__process_bpf(machine, event, sample); break;
2216	case PERF_RECORD_TEXT_POKE:
2217		ret = machine__process_text_poke(machine, event, sample); break;
2218	case PERF_RECORD_AUX_OUTPUT_HW_ID:
2219		ret = machine__process_aux_output_hw_id_event(machine, event); break;
2220	default:
2221		ret = -1;
2222		break;
2223	}
2224
2225	return ret;
2226}
2227
2228static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
2229{
2230	return regexec(regex, sym->name, 0, NULL, 0) == 0;
 
 
2231}
2232
2233static void ip__resolve_ams(struct thread *thread,
2234			    struct addr_map_symbol *ams,
2235			    u64 ip)
2236{
2237	struct addr_location al;
2238
2239	addr_location__init(&al);
2240	/*
2241	 * We cannot use the header.misc hint to determine whether a
2242	 * branch stack address is user, kernel, guest, hypervisor.
2243	 * Branches may straddle the kernel/user/hypervisor boundaries.
2244	 * Thus, we have to try consecutively until we find a match
2245	 * or else, the symbol is unknown
2246	 */
2247	thread__find_cpumode_addr_location(thread, ip, &al);
2248
2249	ams->addr = ip;
2250	ams->al_addr = al.addr;
2251	ams->al_level = al.level;
2252	ams->ms.maps = maps__get(al.maps);
2253	ams->ms.sym = al.sym;
2254	ams->ms.map = map__get(al.map);
2255	ams->phys_addr = 0;
2256	ams->data_page_size = 0;
2257	addr_location__exit(&al);
2258}
2259
2260static void ip__resolve_data(struct thread *thread,
2261			     u8 m, struct addr_map_symbol *ams,
2262			     u64 addr, u64 phys_addr, u64 daddr_page_size)
2263{
2264	struct addr_location al;
2265
2266	addr_location__init(&al);
2267
2268	thread__find_symbol(thread, m, addr, &al);
2269
2270	ams->addr = addr;
2271	ams->al_addr = al.addr;
2272	ams->al_level = al.level;
2273	ams->ms.maps = maps__get(al.maps);
2274	ams->ms.sym = al.sym;
2275	ams->ms.map = map__get(al.map);
2276	ams->phys_addr = phys_addr;
2277	ams->data_page_size = daddr_page_size;
2278	addr_location__exit(&al);
2279}
2280
2281struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2282				     struct addr_location *al)
2283{
2284	struct mem_info *mi = mem_info__new();
2285
2286	if (!mi)
2287		return NULL;
2288
2289	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2290	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2291			 sample->addr, sample->phys_addr,
2292			 sample->data_page_size);
2293	mi->data_src.val = sample->data_src;
2294
2295	return mi;
2296}
2297
2298static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2299{
2300	struct map *map = ms->map;
2301	char *srcline = NULL;
2302	struct dso *dso;
2303
2304	if (!map || callchain_param.key == CCKEY_FUNCTION)
2305		return srcline;
2306
2307	dso = map__dso(map);
2308	srcline = srcline__tree_find(&dso->srclines, ip);
2309	if (!srcline) {
2310		bool show_sym = false;
2311		bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2312
2313		srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2314				      ms->sym, show_sym, show_addr, ip);
2315		srcline__tree_insert(&dso->srclines, ip, srcline);
2316	}
2317
2318	return srcline;
2319}
2320
2321struct iterations {
2322	int nr_loop_iter;
2323	u64 cycles;
2324};
2325
2326static int add_callchain_ip(struct thread *thread,
2327			    struct callchain_cursor *cursor,
2328			    struct symbol **parent,
2329			    struct addr_location *root_al,
2330			    u8 *cpumode,
2331			    u64 ip,
2332			    bool branch,
2333			    struct branch_flags *flags,
2334			    struct iterations *iter,
2335			    u64 branch_from)
2336{
2337	struct map_symbol ms = {};
2338	struct addr_location al;
2339	int nr_loop_iter = 0, err = 0;
2340	u64 iter_cycles = 0;
2341	const char *srcline = NULL;
2342
2343	addr_location__init(&al);
2344	al.filtered = 0;
2345	al.sym = NULL;
2346	al.srcline = NULL;
2347	if (!cpumode) {
2348		thread__find_cpumode_addr_location(thread, ip, &al);
2349	} else {
2350		if (ip >= PERF_CONTEXT_MAX) {
2351			switch (ip) {
2352			case PERF_CONTEXT_HV:
2353				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2354				break;
2355			case PERF_CONTEXT_KERNEL:
2356				*cpumode = PERF_RECORD_MISC_KERNEL;
2357				break;
2358			case PERF_CONTEXT_USER:
2359				*cpumode = PERF_RECORD_MISC_USER;
2360				break;
2361			default:
2362				pr_debug("invalid callchain context: "
2363					 "%"PRId64"\n", (s64) ip);
2364				/*
2365				 * It seems the callchain is corrupted.
2366				 * Discard all.
2367				 */
2368				callchain_cursor_reset(cursor);
2369				err = 1;
2370				goto out;
2371			}
2372			goto out;
2373		}
2374		thread__find_symbol(thread, *cpumode, ip, &al);
2375	}
2376
2377	if (al.sym != NULL) {
2378		if (perf_hpp_list.parent && !*parent &&
2379		    symbol__match_regex(al.sym, &parent_regex))
2380			*parent = al.sym;
2381		else if (have_ignore_callees && root_al &&
2382		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
2383			/* Treat this symbol as the root,
2384			   forgetting its callees. */
2385			addr_location__copy(root_al, &al);
2386			callchain_cursor_reset(cursor);
2387		}
2388	}
2389
2390	if (symbol_conf.hide_unresolved && al.sym == NULL)
2391		goto out;
2392
2393	if (iter) {
2394		nr_loop_iter = iter->nr_loop_iter;
2395		iter_cycles = iter->cycles;
2396	}
2397
2398	ms.maps = maps__get(al.maps);
2399	ms.map = map__get(al.map);
2400	ms.sym = al.sym;
2401	srcline = callchain_srcline(&ms, al.addr);
2402	err = callchain_cursor_append(cursor, ip, &ms,
2403				      branch, flags, nr_loop_iter,
2404				      iter_cycles, branch_from, srcline);
2405out:
2406	addr_location__exit(&al);
2407	map_symbol__exit(&ms);
2408	return err;
2409}
2410
2411struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2412					   struct addr_location *al)
2413{
2414	unsigned int i;
2415	const struct branch_stack *bs = sample->branch_stack;
2416	struct branch_entry *entries = perf_sample__branch_entries(sample);
2417	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2418
2419	if (!bi)
2420		return NULL;
2421
2422	for (i = 0; i < bs->nr; i++) {
2423		ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2424		ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2425		bi[i].flags = entries[i].flags;
2426	}
2427	return bi;
2428}
2429
2430static void save_iterations(struct iterations *iter,
2431			    struct branch_entry *be, int nr)
2432{
2433	int i;
2434
2435	iter->nr_loop_iter++;
2436	iter->cycles = 0;
2437
2438	for (i = 0; i < nr; i++)
2439		iter->cycles += be[i].flags.cycles;
2440}
2441
2442#define CHASHSZ 127
2443#define CHASHBITS 7
2444#define NO_ENTRY 0xff
2445
2446#define PERF_MAX_BRANCH_DEPTH 127
2447
2448/* Remove loops. */
2449static int remove_loops(struct branch_entry *l, int nr,
2450			struct iterations *iter)
2451{
2452	int i, j, off;
2453	unsigned char chash[CHASHSZ];
2454
2455	memset(chash, NO_ENTRY, sizeof(chash));
2456
2457	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2458
2459	for (i = 0; i < nr; i++) {
2460		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2461
2462		/* no collision handling for now */
2463		if (chash[h] == NO_ENTRY) {
2464			chash[h] = i;
2465		} else if (l[chash[h]].from == l[i].from) {
2466			bool is_loop = true;
2467			/* check if it is a real loop */
2468			off = 0;
2469			for (j = chash[h]; j < i && i + off < nr; j++, off++)
2470				if (l[j].from != l[i + off].from) {
2471					is_loop = false;
2472					break;
2473				}
2474			if (is_loop) {
2475				j = nr - (i + off);
2476				if (j > 0) {
2477					save_iterations(iter + i + off,
2478						l + i, off);
2479
2480					memmove(iter + i, iter + i + off,
2481						j * sizeof(*iter));
2482
2483					memmove(l + i, l + i + off,
2484						j * sizeof(*l));
2485				}
2486
2487				nr -= off;
2488			}
2489		}
2490	}
2491	return nr;
2492}
2493
2494static int lbr_callchain_add_kernel_ip(struct thread *thread,
2495				       struct callchain_cursor *cursor,
2496				       struct perf_sample *sample,
2497				       struct symbol **parent,
2498				       struct addr_location *root_al,
2499				       u64 branch_from,
2500				       bool callee, int end)
2501{
2502	struct ip_callchain *chain = sample->callchain;
2503	u8 cpumode = PERF_RECORD_MISC_USER;
2504	int err, i;
2505
2506	if (callee) {
2507		for (i = 0; i < end + 1; i++) {
2508			err = add_callchain_ip(thread, cursor, parent,
2509					       root_al, &cpumode, chain->ips[i],
2510					       false, NULL, NULL, branch_from);
2511			if (err)
2512				return err;
2513		}
2514		return 0;
2515	}
2516
2517	for (i = end; i >= 0; i--) {
2518		err = add_callchain_ip(thread, cursor, parent,
2519				       root_al, &cpumode, chain->ips[i],
2520				       false, NULL, NULL, branch_from);
2521		if (err)
2522			return err;
2523	}
2524
2525	return 0;
2526}
2527
2528static void save_lbr_cursor_node(struct thread *thread,
2529				 struct callchain_cursor *cursor,
2530				 int idx)
2531{
2532	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2533
2534	if (!lbr_stitch)
2535		return;
2536
2537	if (cursor->pos == cursor->nr) {
2538		lbr_stitch->prev_lbr_cursor[idx].valid = false;
2539		return;
2540	}
2541
2542	if (!cursor->curr)
2543		cursor->curr = cursor->first;
2544	else
2545		cursor->curr = cursor->curr->next;
2546	memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2547	       sizeof(struct callchain_cursor_node));
2548
2549	lbr_stitch->prev_lbr_cursor[idx].valid = true;
2550	cursor->pos++;
2551}
2552
2553static int lbr_callchain_add_lbr_ip(struct thread *thread,
2554				    struct callchain_cursor *cursor,
2555				    struct perf_sample *sample,
2556				    struct symbol **parent,
2557				    struct addr_location *root_al,
2558				    u64 *branch_from,
2559				    bool callee)
2560{
2561	struct branch_stack *lbr_stack = sample->branch_stack;
2562	struct branch_entry *entries = perf_sample__branch_entries(sample);
2563	u8 cpumode = PERF_RECORD_MISC_USER;
2564	int lbr_nr = lbr_stack->nr;
2565	struct branch_flags *flags;
2566	int err, i;
2567	u64 ip;
2568
2569	/*
2570	 * The curr and pos are not used in writing session. They are cleared
2571	 * in callchain_cursor_commit() when the writing session is closed.
2572	 * Using curr and pos to track the current cursor node.
2573	 */
2574	if (thread__lbr_stitch(thread)) {
2575		cursor->curr = NULL;
2576		cursor->pos = cursor->nr;
2577		if (cursor->nr) {
2578			cursor->curr = cursor->first;
2579			for (i = 0; i < (int)(cursor->nr - 1); i++)
2580				cursor->curr = cursor->curr->next;
2581		}
2582	}
2583
2584	if (callee) {
2585		/* Add LBR ip from first entries.to */
2586		ip = entries[0].to;
2587		flags = &entries[0].flags;
2588		*branch_from = entries[0].from;
2589		err = add_callchain_ip(thread, cursor, parent,
2590				       root_al, &cpumode, ip,
2591				       true, flags, NULL,
2592				       *branch_from);
2593		if (err)
2594			return err;
2595
2596		/*
2597		 * The number of cursor node increases.
2598		 * Move the current cursor node.
2599		 * But does not need to save current cursor node for entry 0.
2600		 * It's impossible to stitch the whole LBRs of previous sample.
2601		 */
2602		if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2603			if (!cursor->curr)
2604				cursor->curr = cursor->first;
2605			else
2606				cursor->curr = cursor->curr->next;
2607			cursor->pos++;
2608		}
2609
2610		/* Add LBR ip from entries.from one by one. */
2611		for (i = 0; i < lbr_nr; i++) {
2612			ip = entries[i].from;
2613			flags = &entries[i].flags;
2614			err = add_callchain_ip(thread, cursor, parent,
2615					       root_al, &cpumode, ip,
2616					       true, flags, NULL,
2617					       *branch_from);
2618			if (err)
2619				return err;
2620			save_lbr_cursor_node(thread, cursor, i);
2621		}
2622		return 0;
2623	}
2624
2625	/* Add LBR ip from entries.from one by one. */
2626	for (i = lbr_nr - 1; i >= 0; i--) {
2627		ip = entries[i].from;
2628		flags = &entries[i].flags;
2629		err = add_callchain_ip(thread, cursor, parent,
2630				       root_al, &cpumode, ip,
2631				       true, flags, NULL,
2632				       *branch_from);
2633		if (err)
2634			return err;
2635		save_lbr_cursor_node(thread, cursor, i);
2636	}
2637
2638	if (lbr_nr > 0) {
2639		/* Add LBR ip from first entries.to */
2640		ip = entries[0].to;
2641		flags = &entries[0].flags;
2642		*branch_from = entries[0].from;
2643		err = add_callchain_ip(thread, cursor, parent,
2644				root_al, &cpumode, ip,
2645				true, flags, NULL,
2646				*branch_from);
2647		if (err)
2648			return err;
2649	}
2650
2651	return 0;
2652}
2653
2654static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2655					     struct callchain_cursor *cursor)
2656{
2657	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2658	struct callchain_cursor_node *cnode;
2659	struct stitch_list *stitch_node;
2660	int err;
2661
2662	list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2663		cnode = &stitch_node->cursor;
2664
2665		err = callchain_cursor_append(cursor, cnode->ip,
2666					      &cnode->ms,
2667					      cnode->branch,
2668					      &cnode->branch_flags,
2669					      cnode->nr_loop_iter,
2670					      cnode->iter_cycles,
2671					      cnode->branch_from,
2672					      cnode->srcline);
2673		if (err)
2674			return err;
2675	}
2676	return 0;
2677}
2678
2679static struct stitch_list *get_stitch_node(struct thread *thread)
2680{
2681	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2682	struct stitch_list *stitch_node;
2683
2684	if (!list_empty(&lbr_stitch->free_lists)) {
2685		stitch_node = list_first_entry(&lbr_stitch->free_lists,
2686					       struct stitch_list, node);
2687		list_del(&stitch_node->node);
2688
2689		return stitch_node;
2690	}
2691
2692	return malloc(sizeof(struct stitch_list));
2693}
2694
2695static bool has_stitched_lbr(struct thread *thread,
2696			     struct perf_sample *cur,
2697			     struct perf_sample *prev,
2698			     unsigned int max_lbr,
2699			     bool callee)
2700{
2701	struct branch_stack *cur_stack = cur->branch_stack;
2702	struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2703	struct branch_stack *prev_stack = prev->branch_stack;
2704	struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2705	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2706	int i, j, nr_identical_branches = 0;
2707	struct stitch_list *stitch_node;
2708	u64 cur_base, distance;
2709
2710	if (!cur_stack || !prev_stack)
2711		return false;
2712
2713	/* Find the physical index of the base-of-stack for current sample. */
2714	cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2715
2716	distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2717						     (max_lbr + prev_stack->hw_idx - cur_base);
2718	/* Previous sample has shorter stack. Nothing can be stitched. */
2719	if (distance + 1 > prev_stack->nr)
2720		return false;
2721
2722	/*
2723	 * Check if there are identical LBRs between two samples.
2724	 * Identical LBRs must have same from, to and flags values. Also,
2725	 * they have to be saved in the same LBR registers (same physical
2726	 * index).
2727	 *
2728	 * Starts from the base-of-stack of current sample.
2729	 */
2730	for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2731		if ((prev_entries[i].from != cur_entries[j].from) ||
2732		    (prev_entries[i].to != cur_entries[j].to) ||
2733		    (prev_entries[i].flags.value != cur_entries[j].flags.value))
2734			break;
2735		nr_identical_branches++;
2736	}
2737
2738	if (!nr_identical_branches)
2739		return false;
2740
2741	/*
2742	 * Save the LBRs between the base-of-stack of previous sample
2743	 * and the base-of-stack of current sample into lbr_stitch->lists.
2744	 * These LBRs will be stitched later.
2745	 */
2746	for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2747
2748		if (!lbr_stitch->prev_lbr_cursor[i].valid)
2749			continue;
2750
2751		stitch_node = get_stitch_node(thread);
2752		if (!stitch_node)
2753			return false;
2754
2755		memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2756		       sizeof(struct callchain_cursor_node));
2757
2758		if (callee)
2759			list_add(&stitch_node->node, &lbr_stitch->lists);
2760		else
2761			list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2762	}
2763
2764	return true;
2765}
2766
2767static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2768{
2769	if (thread__lbr_stitch(thread))
2770		return true;
2771
2772	thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2773	if (!thread__lbr_stitch(thread))
2774		goto err;
2775
2776	thread__lbr_stitch(thread)->prev_lbr_cursor =
2777		calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2778	if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2779		goto free_lbr_stitch;
2780
2781	INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2782	INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2783
2784	return true;
2785
2786free_lbr_stitch:
2787	free(thread__lbr_stitch(thread));
2788	thread__set_lbr_stitch(thread, NULL);
2789err:
2790	pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2791	thread__set_lbr_stitch_enable(thread, false);
2792	return false;
2793}
2794
2795/*
2796 * Resolve LBR callstack chain sample
2797 * Return:
2798 * 1 on success get LBR callchain information
2799 * 0 no available LBR callchain information, should try fp
2800 * negative error code on other errors.
2801 */
2802static int resolve_lbr_callchain_sample(struct thread *thread,
2803					struct callchain_cursor *cursor,
2804					struct perf_sample *sample,
2805					struct symbol **parent,
2806					struct addr_location *root_al,
2807					int max_stack,
2808					unsigned int max_lbr)
2809{
2810	bool callee = (callchain_param.order == ORDER_CALLEE);
2811	struct ip_callchain *chain = sample->callchain;
2812	int chain_nr = min(max_stack, (int)chain->nr), i;
2813	struct lbr_stitch *lbr_stitch;
2814	bool stitched_lbr = false;
2815	u64 branch_from = 0;
2816	int err;
2817
2818	for (i = 0; i < chain_nr; i++) {
2819		if (chain->ips[i] == PERF_CONTEXT_USER)
2820			break;
2821	}
2822
2823	/* LBR only affects the user callchain */
2824	if (i == chain_nr)
2825		return 0;
2826
2827	if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2828	    (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2829		lbr_stitch = thread__lbr_stitch(thread);
2830
2831		stitched_lbr = has_stitched_lbr(thread, sample,
2832						&lbr_stitch->prev_sample,
2833						max_lbr, callee);
2834
2835		if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2836			list_replace_init(&lbr_stitch->lists,
2837					  &lbr_stitch->free_lists);
2838		}
2839		memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2840	}
2841
2842	if (callee) {
2843		/* Add kernel ip */
2844		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2845						  parent, root_al, branch_from,
2846						  true, i);
2847		if (err)
2848			goto error;
2849
2850		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2851					       root_al, &branch_from, true);
2852		if (err)
2853			goto error;
2854
2855		if (stitched_lbr) {
2856			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2857			if (err)
2858				goto error;
2859		}
2860
2861	} else {
2862		if (stitched_lbr) {
2863			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2864			if (err)
2865				goto error;
2866		}
2867		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2868					       root_al, &branch_from, false);
2869		if (err)
2870			goto error;
2871
2872		/* Add kernel ip */
2873		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2874						  parent, root_al, branch_from,
2875						  false, i);
2876		if (err)
2877			goto error;
2878	}
2879	return 1;
2880
2881error:
2882	return (err < 0) ? err : 0;
2883}
2884
2885static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2886			     struct callchain_cursor *cursor,
2887			     struct symbol **parent,
2888			     struct addr_location *root_al,
2889			     u8 *cpumode, int ent)
2890{
2891	int err = 0;
2892
2893	while (--ent >= 0) {
2894		u64 ip = chain->ips[ent];
2895
2896		if (ip >= PERF_CONTEXT_MAX) {
2897			err = add_callchain_ip(thread, cursor, parent,
2898					       root_al, cpumode, ip,
2899					       false, NULL, NULL, 0);
2900			break;
2901		}
2902	}
2903	return err;
2904}
2905
2906static u64 get_leaf_frame_caller(struct perf_sample *sample,
2907		struct thread *thread, int usr_idx)
2908{
2909	if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2910		return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2911	else
2912		return 0;
2913}
2914
2915static int thread__resolve_callchain_sample(struct thread *thread,
2916					    struct callchain_cursor *cursor,
2917					    struct evsel *evsel,
2918					    struct perf_sample *sample,
2919					    struct symbol **parent,
2920					    struct addr_location *root_al,
2921					    int max_stack)
2922{
2923	struct branch_stack *branch = sample->branch_stack;
2924	struct branch_entry *entries = perf_sample__branch_entries(sample);
2925	struct ip_callchain *chain = sample->callchain;
2926	int chain_nr = 0;
2927	u8 cpumode = PERF_RECORD_MISC_USER;
2928	int i, j, err, nr_entries, usr_idx;
2929	int skip_idx = -1;
2930	int first_call = 0;
2931	u64 leaf_frame_caller;
2932
2933	if (chain)
2934		chain_nr = chain->nr;
2935
2936	if (evsel__has_branch_callstack(evsel)) {
2937		struct perf_env *env = evsel__env(evsel);
2938
2939		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2940						   root_al, max_stack,
2941						   !env ? 0 : env->max_branches);
2942		if (err)
2943			return (err < 0) ? err : 0;
2944	}
2945
2946	/*
2947	 * Based on DWARF debug information, some architectures skip
2948	 * a callchain entry saved by the kernel.
2949	 */
2950	skip_idx = arch_skip_callchain_idx(thread, chain);
2951
2952	/*
2953	 * Add branches to call stack for easier browsing. This gives
2954	 * more context for a sample than just the callers.
2955	 *
2956	 * This uses individual histograms of paths compared to the
2957	 * aggregated histograms the normal LBR mode uses.
2958	 *
2959	 * Limitations for now:
2960	 * - No extra filters
2961	 * - No annotations (should annotate somehow)
2962	 */
2963
2964	if (branch && callchain_param.branch_callstack) {
2965		int nr = min(max_stack, (int)branch->nr);
2966		struct branch_entry be[nr];
2967		struct iterations iter[nr];
2968
2969		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2970			pr_warning("corrupted branch chain. skipping...\n");
2971			goto check_calls;
2972		}
2973
2974		for (i = 0; i < nr; i++) {
2975			if (callchain_param.order == ORDER_CALLEE) {
2976				be[i] = entries[i];
2977
2978				if (chain == NULL)
2979					continue;
2980
2981				/*
2982				 * Check for overlap into the callchain.
2983				 * The return address is one off compared to
2984				 * the branch entry. To adjust for this
2985				 * assume the calling instruction is not longer
2986				 * than 8 bytes.
2987				 */
2988				if (i == skip_idx ||
2989				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
2990					first_call++;
2991				else if (be[i].from < chain->ips[first_call] &&
2992				    be[i].from >= chain->ips[first_call] - 8)
2993					first_call++;
2994			} else
2995				be[i] = entries[branch->nr - i - 1];
2996		}
2997
2998		memset(iter, 0, sizeof(struct iterations) * nr);
2999		nr = remove_loops(be, nr, iter);
3000
3001		for (i = 0; i < nr; i++) {
3002			err = add_callchain_ip(thread, cursor, parent,
3003					       root_al,
3004					       NULL, be[i].to,
3005					       true, &be[i].flags,
3006					       NULL, be[i].from);
3007
3008			if (!err)
3009				err = add_callchain_ip(thread, cursor, parent, root_al,
3010						       NULL, be[i].from,
3011						       true, &be[i].flags,
3012						       &iter[i], 0);
3013			if (err == -EINVAL)
3014				break;
3015			if (err)
3016				return err;
3017		}
3018
3019		if (chain_nr == 0)
3020			return 0;
3021
3022		chain_nr -= nr;
3023	}
3024
3025check_calls:
3026	if (chain && callchain_param.order != ORDER_CALLEE) {
3027		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
3028					&cpumode, chain->nr - first_call);
3029		if (err)
3030			return (err < 0) ? err : 0;
3031	}
3032	for (i = first_call, nr_entries = 0;
3033	     i < chain_nr && nr_entries < max_stack; i++) {
3034		u64 ip;
3035
3036		if (callchain_param.order == ORDER_CALLEE)
3037			j = i;
3038		else
3039			j = chain->nr - i - 1;
3040
3041#ifdef HAVE_SKIP_CALLCHAIN_IDX
3042		if (j == skip_idx)
3043			continue;
3044#endif
3045		ip = chain->ips[j];
3046		if (ip < PERF_CONTEXT_MAX)
3047                       ++nr_entries;
3048		else if (callchain_param.order != ORDER_CALLEE) {
3049			err = find_prev_cpumode(chain, thread, cursor, parent,
3050						root_al, &cpumode, j);
3051			if (err)
3052				return (err < 0) ? err : 0;
3053			continue;
3054		}
3055
3056		/*
3057		 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
3058		 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
3059		 * the index will be different in order to add the missing frame
3060		 * at the right place.
3061		 */
3062
3063		usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
3064
3065		if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
3066
3067			leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
3068
3069			/*
3070			 * check if leaf_frame_Caller != ip to not add the same
3071			 * value twice.
3072			 */
3073
3074			if (leaf_frame_caller && leaf_frame_caller != ip) {
3075
3076				err = add_callchain_ip(thread, cursor, parent,
3077					       root_al, &cpumode, leaf_frame_caller,
3078					       false, NULL, NULL, 0);
3079				if (err)
3080					return (err < 0) ? err : 0;
3081			}
3082		}
3083
3084		err = add_callchain_ip(thread, cursor, parent,
3085				       root_al, &cpumode, ip,
3086				       false, NULL, NULL, 0);
3087
3088		if (err)
3089			return (err < 0) ? err : 0;
3090	}
3091
3092	return 0;
3093}
3094
3095static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
3096{
3097	struct symbol *sym = ms->sym;
3098	struct map *map = ms->map;
3099	struct inline_node *inline_node;
3100	struct inline_list *ilist;
3101	struct dso *dso;
3102	u64 addr;
3103	int ret = 1;
3104	struct map_symbol ilist_ms;
3105
3106	if (!symbol_conf.inline_name || !map || !sym)
3107		return ret;
3108
3109	addr = map__dso_map_ip(map, ip);
3110	addr = map__rip_2objdump(map, addr);
3111	dso = map__dso(map);
3112
3113	inline_node = inlines__tree_find(&dso->inlined_nodes, addr);
3114	if (!inline_node) {
3115		inline_node = dso__parse_addr_inlines(dso, addr, sym);
3116		if (!inline_node)
3117			return ret;
3118		inlines__tree_insert(&dso->inlined_nodes, inline_node);
3119	}
3120
3121	ilist_ms = (struct map_symbol) {
3122		.maps = maps__get(ms->maps),
3123		.map = map__get(map),
3124	};
3125	list_for_each_entry(ilist, &inline_node->val, list) {
3126		ilist_ms.sym = ilist->symbol;
 
 
 
 
3127		ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
3128					      NULL, 0, 0, 0, ilist->srcline);
3129
3130		if (ret != 0)
3131			return ret;
3132	}
3133	map_symbol__exit(&ilist_ms);
3134
3135	return ret;
3136}
3137
3138static int unwind_entry(struct unwind_entry *entry, void *arg)
3139{
3140	struct callchain_cursor *cursor = arg;
3141	const char *srcline = NULL;
3142	u64 addr = entry->ip;
3143
3144	if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
3145		return 0;
3146
3147	if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
3148		return 0;
3149
3150	/*
3151	 * Convert entry->ip from a virtual address to an offset in
3152	 * its corresponding binary.
3153	 */
3154	if (entry->ms.map)
3155		addr = map__dso_map_ip(entry->ms.map, entry->ip);
3156
3157	srcline = callchain_srcline(&entry->ms, addr);
3158	return callchain_cursor_append(cursor, entry->ip, &entry->ms,
3159				       false, NULL, 0, 0, 0, srcline);
3160}
3161
3162static int thread__resolve_callchain_unwind(struct thread *thread,
3163					    struct callchain_cursor *cursor,
3164					    struct evsel *evsel,
3165					    struct perf_sample *sample,
3166					    int max_stack)
3167{
3168	/* Can we do dwarf post unwind? */
3169	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
3170	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3171		return 0;
3172
3173	/* Bail out if nothing was captured. */
3174	if ((!sample->user_regs.regs) ||
3175	    (!sample->user_stack.size))
3176		return 0;
3177
3178	return unwind__get_entries(unwind_entry, cursor,
3179				   thread, sample, max_stack, false);
3180}
3181
3182int thread__resolve_callchain(struct thread *thread,
3183			      struct callchain_cursor *cursor,
3184			      struct evsel *evsel,
3185			      struct perf_sample *sample,
3186			      struct symbol **parent,
3187			      struct addr_location *root_al,
3188			      int max_stack)
3189{
3190	int ret = 0;
3191
3192	if (cursor == NULL)
3193		return -ENOMEM;
3194
3195	callchain_cursor_reset(cursor);
3196
3197	if (callchain_param.order == ORDER_CALLEE) {
3198		ret = thread__resolve_callchain_sample(thread, cursor,
3199						       evsel, sample,
3200						       parent, root_al,
3201						       max_stack);
3202		if (ret)
3203			return ret;
3204		ret = thread__resolve_callchain_unwind(thread, cursor,
3205						       evsel, sample,
3206						       max_stack);
3207	} else {
3208		ret = thread__resolve_callchain_unwind(thread, cursor,
3209						       evsel, sample,
3210						       max_stack);
3211		if (ret)
3212			return ret;
3213		ret = thread__resolve_callchain_sample(thread, cursor,
3214						       evsel, sample,
3215						       parent, root_al,
3216						       max_stack);
3217	}
3218
3219	return ret;
3220}
3221
3222int machine__for_each_thread(struct machine *machine,
3223			     int (*fn)(struct thread *thread, void *p),
3224			     void *priv)
3225{
3226	struct threads *threads;
3227	struct rb_node *nd;
 
3228	int rc = 0;
3229	int i;
3230
3231	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3232		threads = &machine->threads[i];
3233		for (nd = rb_first_cached(&threads->entries); nd;
3234		     nd = rb_next(nd)) {
3235			struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
 
 
 
 
3236
3237			rc = fn(trb->thread, priv);
 
3238			if (rc != 0)
3239				return rc;
3240		}
3241	}
3242	return rc;
3243}
3244
3245int machines__for_each_thread(struct machines *machines,
3246			      int (*fn)(struct thread *thread, void *p),
3247			      void *priv)
3248{
3249	struct rb_node *nd;
3250	int rc = 0;
3251
3252	rc = machine__for_each_thread(&machines->host, fn, priv);
3253	if (rc != 0)
3254		return rc;
3255
3256	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3257		struct machine *machine = rb_entry(nd, struct machine, rb_node);
3258
3259		rc = machine__for_each_thread(machine, fn, priv);
3260		if (rc != 0)
3261			return rc;
3262	}
3263	return rc;
3264}
3265
3266pid_t machine__get_current_tid(struct machine *machine, int cpu)
3267{
3268	if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3269		return -1;
3270
3271	return machine->current_tid[cpu];
3272}
3273
3274int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3275			     pid_t tid)
3276{
3277	struct thread *thread;
3278	const pid_t init_val = -1;
3279
3280	if (cpu < 0)
3281		return -EINVAL;
3282
3283	if (realloc_array_as_needed(machine->current_tid,
3284				    machine->current_tid_sz,
3285				    (unsigned int)cpu,
3286				    &init_val))
3287		return -ENOMEM;
3288
3289	machine->current_tid[cpu] = tid;
3290
3291	thread = machine__findnew_thread(machine, pid, tid);
3292	if (!thread)
3293		return -ENOMEM;
3294
3295	thread__set_cpu(thread, cpu);
3296	thread__put(thread);
3297
3298	return 0;
3299}
3300
3301/*
3302 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3303 * machine__normalized_is() if a normalized arch is needed.
3304 */
3305bool machine__is(struct machine *machine, const char *arch)
3306{
3307	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3308}
3309
3310bool machine__normalized_is(struct machine *machine, const char *arch)
3311{
3312	return machine && !strcmp(perf_env__arch(machine->env), arch);
3313}
3314
3315int machine__nr_cpus_avail(struct machine *machine)
3316{
3317	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3318}
3319
3320int machine__get_kernel_start(struct machine *machine)
3321{
3322	struct map *map = machine__kernel_map(machine);
3323	int err = 0;
3324
3325	/*
3326	 * The only addresses above 2^63 are kernel addresses of a 64-bit
3327	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
3328	 * all addresses including kernel addresses are less than 2^32.  In
3329	 * that case (32-bit system), if the kernel mapping is unknown, all
3330	 * addresses will be assumed to be in user space - see
3331	 * machine__kernel_ip().
3332	 */
3333	machine->kernel_start = 1ULL << 63;
3334	if (map) {
3335		err = map__load(map);
3336		/*
3337		 * On x86_64, PTI entry trampolines are less than the
3338		 * start of kernel text, but still above 2^63. So leave
3339		 * kernel_start = 1ULL << 63 for x86_64.
3340		 */
3341		if (!err && !machine__is(machine, "x86_64"))
3342			machine->kernel_start = map__start(map);
3343	}
3344	return err;
3345}
3346
3347u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3348{
3349	u8 addr_cpumode = cpumode;
3350	bool kernel_ip;
3351
3352	if (!machine->single_address_space)
3353		goto out;
3354
3355	kernel_ip = machine__kernel_ip(machine, addr);
3356	switch (cpumode) {
3357	case PERF_RECORD_MISC_KERNEL:
3358	case PERF_RECORD_MISC_USER:
3359		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3360					   PERF_RECORD_MISC_USER;
3361		break;
3362	case PERF_RECORD_MISC_GUEST_KERNEL:
3363	case PERF_RECORD_MISC_GUEST_USER:
3364		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3365					   PERF_RECORD_MISC_GUEST_USER;
3366		break;
3367	default:
3368		break;
3369	}
3370out:
3371	return addr_cpumode;
3372}
3373
3374struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3375{
3376	return dsos__findnew_id(&machine->dsos, filename, id);
3377}
3378
3379struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3380{
3381	return machine__findnew_dso_id(machine, filename, NULL);
3382}
3383
3384char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3385{
3386	struct machine *machine = vmachine;
3387	struct map *map;
3388	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3389
3390	if (sym == NULL)
3391		return NULL;
3392
3393	*modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL;
3394	*addrp = map__unmap_ip(map, sym->start);
3395	return sym->name;
3396}
3397
3398int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3399{
3400	struct dso *pos;
3401	int err = 0;
3402
3403	list_for_each_entry(pos, &machine->dsos.head, node) {
3404		if (fn(pos, machine, priv))
3405			err = -1;
3406	}
3407	return err;
3408}
3409
3410int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3411{
3412	struct maps *maps = machine__kernel_maps(machine);
 
 
3413
3414	return maps__for_each_map(maps, fn, priv);
 
 
 
 
 
 
3415}
3416
3417bool machine__is_lock_function(struct machine *machine, u64 addr)
3418{
3419	if (!machine->sched.text_start) {
3420		struct map *kmap;
3421		struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3422
3423		if (!sym) {
3424			/* to avoid retry */
3425			machine->sched.text_start = 1;
3426			return false;
3427		}
3428
3429		machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3430
3431		/* should not fail from here */
3432		sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3433		machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3434
3435		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3436		machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3437
3438		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3439		machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3440	}
3441
3442	/* failed to get kernel symbols */
3443	if (machine->sched.text_start == 1)
3444		return false;
3445
3446	/* mutex and rwsem functions are in sched text section */
3447	if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3448		return true;
3449
3450	/* spinlock functions are in lock text section */
3451	if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3452		return true;
3453
3454	return false;
3455}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <dirent.h>
   3#include <errno.h>
   4#include <inttypes.h>
   5#include <regex.h>
   6#include <stdlib.h>
   7#include "callchain.h"
   8#include "debug.h"
   9#include "dso.h"
  10#include "env.h"
  11#include "event.h"
  12#include "evsel.h"
  13#include "hist.h"
  14#include "machine.h"
  15#include "map.h"
  16#include "map_symbol.h"
  17#include "branch.h"
  18#include "mem-events.h"
  19#include "path.h"
  20#include "srcline.h"
  21#include "symbol.h"
  22#include "sort.h"
  23#include "strlist.h"
  24#include "target.h"
  25#include "thread.h"
  26#include "util.h"
  27#include "vdso.h"
  28#include <stdbool.h>
  29#include <sys/types.h>
  30#include <sys/stat.h>
  31#include <unistd.h>
  32#include "unwind.h"
  33#include "linux/hash.h"
  34#include "asm/bug.h"
  35#include "bpf-event.h"
  36#include <internal/lib.h> // page_size
  37#include "cgroup.h"
  38#include "arm64-frame-pointer-unwind-support.h"
  39
  40#include <linux/ctype.h>
  41#include <symbol/kallsyms.h>
  42#include <linux/mman.h>
  43#include <linux/string.h>
  44#include <linux/zalloc.h>
  45
  46static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
 
  47
  48static struct dso *machine__kernel_dso(struct machine *machine)
  49{
  50	return machine->vmlinux_map->dso;
  51}
  52
  53static void dsos__init(struct dsos *dsos)
  54{
  55	INIT_LIST_HEAD(&dsos->head);
  56	dsos->root = RB_ROOT;
  57	init_rwsem(&dsos->lock);
  58}
  59
  60static void machine__threads_init(struct machine *machine)
  61{
  62	int i;
  63
  64	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  65		struct threads *threads = &machine->threads[i];
  66		threads->entries = RB_ROOT_CACHED;
  67		init_rwsem(&threads->lock);
  68		threads->nr = 0;
  69		INIT_LIST_HEAD(&threads->dead);
  70		threads->last_match = NULL;
  71	}
  72}
  73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74static int machine__set_mmap_name(struct machine *machine)
  75{
  76	if (machine__is_host(machine))
  77		machine->mmap_name = strdup("[kernel.kallsyms]");
  78	else if (machine__is_default_guest(machine))
  79		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
  80	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
  81			  machine->pid) < 0)
  82		machine->mmap_name = NULL;
  83
  84	return machine->mmap_name ? 0 : -ENOMEM;
  85}
  86
  87static void thread__set_guest_comm(struct thread *thread, pid_t pid)
  88{
  89	char comm[64];
  90
  91	snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  92	thread__set_comm(thread, comm, 0);
  93}
  94
  95int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  96{
  97	int err = -ENOMEM;
  98
  99	memset(machine, 0, sizeof(*machine));
 100	machine->kmaps = maps__new(machine);
 101	if (machine->kmaps == NULL)
 102		return -ENOMEM;
 103
 104	RB_CLEAR_NODE(&machine->rb_node);
 105	dsos__init(&machine->dsos);
 106
 107	machine__threads_init(machine);
 108
 109	machine->vdso_info = NULL;
 110	machine->env = NULL;
 111
 112	machine->pid = pid;
 113
 114	machine->id_hdr_size = 0;
 115	machine->kptr_restrict_warned = false;
 116	machine->comm_exec = false;
 117	machine->kernel_start = 0;
 118	machine->vmlinux_map = NULL;
 119
 120	machine->root_dir = strdup(root_dir);
 121	if (machine->root_dir == NULL)
 122		goto out;
 123
 124	if (machine__set_mmap_name(machine))
 125		goto out;
 126
 127	if (pid != HOST_KERNEL_ID) {
 128		struct thread *thread = machine__findnew_thread(machine, -1,
 129								pid);
 130
 131		if (thread == NULL)
 132			goto out;
 133
 134		thread__set_guest_comm(thread, pid);
 135		thread__put(thread);
 136	}
 137
 138	machine->current_tid = NULL;
 139	err = 0;
 140
 141out:
 142	if (err) {
 143		zfree(&machine->kmaps);
 144		zfree(&machine->root_dir);
 145		zfree(&machine->mmap_name);
 146	}
 147	return 0;
 148}
 149
 150struct machine *machine__new_host(void)
 151{
 152	struct machine *machine = malloc(sizeof(*machine));
 153
 154	if (machine != NULL) {
 155		machine__init(machine, "", HOST_KERNEL_ID);
 156
 157		if (machine__create_kernel_maps(machine) < 0)
 158			goto out_delete;
 159	}
 160
 161	return machine;
 162out_delete:
 163	free(machine);
 164	return NULL;
 165}
 166
 167struct machine *machine__new_kallsyms(void)
 168{
 169	struct machine *machine = machine__new_host();
 170	/*
 171	 * FIXME:
 172	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
 173	 *    ask for not using the kcore parsing code, once this one is fixed
 174	 *    to create a map per module.
 175	 */
 176	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
 177		machine__delete(machine);
 178		machine = NULL;
 179	}
 180
 181	return machine;
 182}
 183
 184static void dsos__purge(struct dsos *dsos)
 185{
 186	struct dso *pos, *n;
 187
 188	down_write(&dsos->lock);
 189
 190	list_for_each_entry_safe(pos, n, &dsos->head, node) {
 191		RB_CLEAR_NODE(&pos->rb_node);
 192		pos->root = NULL;
 193		list_del_init(&pos->node);
 194		dso__put(pos);
 195	}
 196
 197	up_write(&dsos->lock);
 198}
 199
 200static void dsos__exit(struct dsos *dsos)
 201{
 202	dsos__purge(dsos);
 203	exit_rwsem(&dsos->lock);
 204}
 205
 206void machine__delete_threads(struct machine *machine)
 207{
 208	struct rb_node *nd;
 209	int i;
 210
 211	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 212		struct threads *threads = &machine->threads[i];
 213		down_write(&threads->lock);
 214		nd = rb_first_cached(&threads->entries);
 215		while (nd) {
 216			struct thread *t = rb_entry(nd, struct thread, rb_node);
 217
 218			nd = rb_next(nd);
 219			__machine__remove_thread(machine, t, false);
 220		}
 221		up_write(&threads->lock);
 222	}
 223}
 224
 225void machine__exit(struct machine *machine)
 226{
 227	int i;
 228
 229	if (machine == NULL)
 230		return;
 231
 232	machine__destroy_kernel_maps(machine);
 233	maps__delete(machine->kmaps);
 234	dsos__exit(&machine->dsos);
 235	machine__exit_vdso(machine);
 236	zfree(&machine->root_dir);
 237	zfree(&machine->mmap_name);
 238	zfree(&machine->current_tid);
 239	zfree(&machine->kallsyms_filename);
 240
 
 241	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 242		struct threads *threads = &machine->threads[i];
 243		struct thread *thread, *n;
 244		/*
 245		 * Forget about the dead, at this point whatever threads were
 246		 * left in the dead lists better have a reference count taken
 247		 * by who is using them, and then, when they drop those references
 248		 * and it finally hits zero, thread__put() will check and see that
 249		 * its not in the dead threads list and will not try to remove it
 250		 * from there, just calling thread__delete() straight away.
 251		 */
 252		list_for_each_entry_safe(thread, n, &threads->dead, node)
 253			list_del_init(&thread->node);
 254
 255		exit_rwsem(&threads->lock);
 256	}
 257}
 258
 259void machine__delete(struct machine *machine)
 260{
 261	if (machine) {
 262		machine__exit(machine);
 263		free(machine);
 264	}
 265}
 266
 267void machines__init(struct machines *machines)
 268{
 269	machine__init(&machines->host, "", HOST_KERNEL_ID);
 270	machines->guests = RB_ROOT_CACHED;
 271}
 272
 273void machines__exit(struct machines *machines)
 274{
 275	machine__exit(&machines->host);
 276	/* XXX exit guest */
 277}
 278
 279struct machine *machines__add(struct machines *machines, pid_t pid,
 280			      const char *root_dir)
 281{
 282	struct rb_node **p = &machines->guests.rb_root.rb_node;
 283	struct rb_node *parent = NULL;
 284	struct machine *pos, *machine = malloc(sizeof(*machine));
 285	bool leftmost = true;
 286
 287	if (machine == NULL)
 288		return NULL;
 289
 290	if (machine__init(machine, root_dir, pid) != 0) {
 291		free(machine);
 292		return NULL;
 293	}
 294
 295	while (*p != NULL) {
 296		parent = *p;
 297		pos = rb_entry(parent, struct machine, rb_node);
 298		if (pid < pos->pid)
 299			p = &(*p)->rb_left;
 300		else {
 301			p = &(*p)->rb_right;
 302			leftmost = false;
 303		}
 304	}
 305
 306	rb_link_node(&machine->rb_node, parent, p);
 307	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
 308
 309	machine->machines = machines;
 310
 311	return machine;
 312}
 313
 314void machines__set_comm_exec(struct machines *machines, bool comm_exec)
 315{
 316	struct rb_node *nd;
 317
 318	machines->host.comm_exec = comm_exec;
 319
 320	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 321		struct machine *machine = rb_entry(nd, struct machine, rb_node);
 322
 323		machine->comm_exec = comm_exec;
 324	}
 325}
 326
 327struct machine *machines__find(struct machines *machines, pid_t pid)
 328{
 329	struct rb_node **p = &machines->guests.rb_root.rb_node;
 330	struct rb_node *parent = NULL;
 331	struct machine *machine;
 332	struct machine *default_machine = NULL;
 333
 334	if (pid == HOST_KERNEL_ID)
 335		return &machines->host;
 336
 337	while (*p != NULL) {
 338		parent = *p;
 339		machine = rb_entry(parent, struct machine, rb_node);
 340		if (pid < machine->pid)
 341			p = &(*p)->rb_left;
 342		else if (pid > machine->pid)
 343			p = &(*p)->rb_right;
 344		else
 345			return machine;
 346		if (!machine->pid)
 347			default_machine = machine;
 348	}
 349
 350	return default_machine;
 351}
 352
 353struct machine *machines__findnew(struct machines *machines, pid_t pid)
 354{
 355	char path[PATH_MAX];
 356	const char *root_dir = "";
 357	struct machine *machine = machines__find(machines, pid);
 358
 359	if (machine && (machine->pid == pid))
 360		goto out;
 361
 362	if ((pid != HOST_KERNEL_ID) &&
 363	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
 364	    (symbol_conf.guestmount)) {
 365		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
 366		if (access(path, R_OK)) {
 367			static struct strlist *seen;
 368
 369			if (!seen)
 370				seen = strlist__new(NULL, NULL);
 371
 372			if (!strlist__has_entry(seen, path)) {
 373				pr_err("Can't access file %s\n", path);
 374				strlist__add(seen, path);
 375			}
 376			machine = NULL;
 377			goto out;
 378		}
 379		root_dir = path;
 380	}
 381
 382	machine = machines__add(machines, pid, root_dir);
 383out:
 384	return machine;
 385}
 386
 387struct machine *machines__find_guest(struct machines *machines, pid_t pid)
 388{
 389	struct machine *machine = machines__find(machines, pid);
 390
 391	if (!machine)
 392		machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
 393	return machine;
 394}
 395
 396/*
 397 * A common case for KVM test programs is that the test program acts as the
 398 * hypervisor, creating, running and destroying the virtual machine, and
 399 * providing the guest object code from its own object code. In this case,
 400 * the VM is not running an OS, but only the functions loaded into it by the
 401 * hypervisor test program, and conveniently, loaded at the same virtual
 402 * addresses.
 403 *
 404 * Normally to resolve addresses, MMAP events are needed to map addresses
 405 * back to the object code and debug symbols for that object code.
 406 *
 407 * Currently, there is no way to get such mapping information from guests
 408 * but, in the scenario described above, the guest has the same mappings
 409 * as the hypervisor, so support for that scenario can be achieved.
 410 *
 411 * To support that, copy the host thread's maps to the guest thread's maps.
 412 * Note, we do not discover the guest until we encounter a guest event,
 413 * which works well because it is not until then that we know that the host
 414 * thread's maps have been set up.
 415 *
 416 * This function returns the guest thread. Apart from keeping the data
 417 * structures sane, using a thread belonging to the guest machine, instead
 418 * of the host thread, allows it to have its own comm (refer
 419 * thread__set_guest_comm()).
 420 */
 421static struct thread *findnew_guest_code(struct machine *machine,
 422					 struct machine *host_machine,
 423					 pid_t pid)
 424{
 425	struct thread *host_thread;
 426	struct thread *thread;
 427	int err;
 428
 429	if (!machine)
 430		return NULL;
 431
 432	thread = machine__findnew_thread(machine, -1, pid);
 433	if (!thread)
 434		return NULL;
 435
 436	/* Assume maps are set up if there are any */
 437	if (thread->maps->nr_maps)
 438		return thread;
 439
 440	host_thread = machine__find_thread(host_machine, -1, pid);
 441	if (!host_thread)
 442		goto out_err;
 443
 444	thread__set_guest_comm(thread, pid);
 445
 446	/*
 447	 * Guest code can be found in hypervisor process at the same address
 448	 * so copy host maps.
 449	 */
 450	err = maps__clone(thread, host_thread->maps);
 451	thread__put(host_thread);
 452	if (err)
 453		goto out_err;
 454
 455	return thread;
 456
 457out_err:
 458	thread__zput(thread);
 459	return NULL;
 460}
 461
 462struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
 463{
 464	struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
 465	struct machine *machine = machines__findnew(machines, pid);
 466
 467	return findnew_guest_code(machine, host_machine, pid);
 468}
 469
 470struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
 471{
 472	struct machines *machines = machine->machines;
 473	struct machine *host_machine;
 474
 475	if (!machines)
 476		return NULL;
 477
 478	host_machine = machines__find(machines, HOST_KERNEL_ID);
 479
 480	return findnew_guest_code(machine, host_machine, pid);
 481}
 482
 483void machines__process_guests(struct machines *machines,
 484			      machine__process_t process, void *data)
 485{
 486	struct rb_node *nd;
 487
 488	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 489		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 490		process(pos, data);
 491	}
 492}
 493
 494void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
 495{
 496	struct rb_node *node;
 497	struct machine *machine;
 498
 499	machines->host.id_hdr_size = id_hdr_size;
 500
 501	for (node = rb_first_cached(&machines->guests); node;
 502	     node = rb_next(node)) {
 503		machine = rb_entry(node, struct machine, rb_node);
 504		machine->id_hdr_size = id_hdr_size;
 505	}
 506
 507	return;
 508}
 509
 510static void machine__update_thread_pid(struct machine *machine,
 511				       struct thread *th, pid_t pid)
 512{
 513	struct thread *leader;
 514
 515	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
 516		return;
 517
 518	th->pid_ = pid;
 519
 520	if (th->pid_ == th->tid)
 521		return;
 522
 523	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
 524	if (!leader)
 525		goto out_err;
 526
 527	if (!leader->maps)
 528		leader->maps = maps__new(machine);
 529
 530	if (!leader->maps)
 531		goto out_err;
 532
 533	if (th->maps == leader->maps)
 534		return;
 535
 536	if (th->maps) {
 537		/*
 538		 * Maps are created from MMAP events which provide the pid and
 539		 * tid.  Consequently there never should be any maps on a thread
 540		 * with an unknown pid.  Just print an error if there are.
 541		 */
 542		if (!maps__empty(th->maps))
 543			pr_err("Discarding thread maps for %d:%d\n",
 544			       th->pid_, th->tid);
 545		maps__put(th->maps);
 546	}
 547
 548	th->maps = maps__get(leader->maps);
 549out_put:
 550	thread__put(leader);
 551	return;
 552out_err:
 553	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
 554	goto out_put;
 555}
 556
 557/*
 558 * Front-end cache - TID lookups come in blocks,
 559 * so most of the time we dont have to look up
 560 * the full rbtree:
 561 */
 562static struct thread*
 563__threads__get_last_match(struct threads *threads, struct machine *machine,
 564			  int pid, int tid)
 565{
 566	struct thread *th;
 567
 568	th = threads->last_match;
 569	if (th != NULL) {
 570		if (th->tid == tid) {
 571			machine__update_thread_pid(machine, th, pid);
 572			return thread__get(th);
 573		}
 574
 575		threads->last_match = NULL;
 576	}
 577
 578	return NULL;
 579}
 580
 581static struct thread*
 582threads__get_last_match(struct threads *threads, struct machine *machine,
 583			int pid, int tid)
 584{
 585	struct thread *th = NULL;
 586
 587	if (perf_singlethreaded)
 588		th = __threads__get_last_match(threads, machine, pid, tid);
 589
 590	return th;
 591}
 592
 593static void
 594__threads__set_last_match(struct threads *threads, struct thread *th)
 595{
 596	threads->last_match = th;
 
 597}
 598
 599static void
 600threads__set_last_match(struct threads *threads, struct thread *th)
 601{
 602	if (perf_singlethreaded)
 603		__threads__set_last_match(threads, th);
 604}
 605
 606/*
 607 * Caller must eventually drop thread->refcnt returned with a successful
 608 * lookup/new thread inserted.
 609 */
 610static struct thread *____machine__findnew_thread(struct machine *machine,
 611						  struct threads *threads,
 612						  pid_t pid, pid_t tid,
 613						  bool create)
 614{
 615	struct rb_node **p = &threads->entries.rb_root.rb_node;
 616	struct rb_node *parent = NULL;
 617	struct thread *th;
 
 618	bool leftmost = true;
 619
 620	th = threads__get_last_match(threads, machine, pid, tid);
 621	if (th)
 622		return th;
 623
 624	while (*p != NULL) {
 625		parent = *p;
 626		th = rb_entry(parent, struct thread, rb_node);
 627
 628		if (th->tid == tid) {
 629			threads__set_last_match(threads, th);
 630			machine__update_thread_pid(machine, th, pid);
 631			return thread__get(th);
 632		}
 633
 634		if (tid < th->tid)
 635			p = &(*p)->rb_left;
 636		else {
 637			p = &(*p)->rb_right;
 638			leftmost = false;
 639		}
 640	}
 641
 642	if (!create)
 643		return NULL;
 644
 645	th = thread__new(pid, tid);
 646	if (th != NULL) {
 647		rb_link_node(&th->rb_node, parent, p);
 648		rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
 
 
 
 
 
 
 649
 650		/*
 651		 * We have to initialize maps separately after rb tree is updated.
 652		 *
 653		 * The reason is that we call machine__findnew_thread
 654		 * within thread__init_maps to find the thread
 655		 * leader and that would screwed the rb tree.
 656		 */
 657		if (thread__init_maps(th, machine)) {
 658			rb_erase_cached(&th->rb_node, &threads->entries);
 659			RB_CLEAR_NODE(&th->rb_node);
 660			thread__put(th);
 661			return NULL;
 662		}
 663		/*
 664		 * It is now in the rbtree, get a ref
 665		 */
 666		thread__get(th);
 667		threads__set_last_match(threads, th);
 668		++threads->nr;
 669	}
 
 
 
 
 
 670
 671	return th;
 672}
 673
 674struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
 675{
 676	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
 677}
 678
 679struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
 680				       pid_t tid)
 681{
 682	struct threads *threads = machine__threads(machine, tid);
 683	struct thread *th;
 684
 685	down_write(&threads->lock);
 686	th = __machine__findnew_thread(machine, pid, tid);
 687	up_write(&threads->lock);
 688	return th;
 689}
 690
 691struct thread *machine__find_thread(struct machine *machine, pid_t pid,
 692				    pid_t tid)
 693{
 694	struct threads *threads = machine__threads(machine, tid);
 695	struct thread *th;
 696
 697	down_read(&threads->lock);
 698	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
 699	up_read(&threads->lock);
 700	return th;
 701}
 702
 703/*
 704 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
 705 * So here a single thread is created for that, but actually there is a separate
 706 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
 707 * is only 1. That causes problems for some tools, requiring workarounds. For
 708 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
 709 */
 710struct thread *machine__idle_thread(struct machine *machine)
 711{
 712	struct thread *thread = machine__findnew_thread(machine, 0, 0);
 713
 714	if (!thread || thread__set_comm(thread, "swapper", 0) ||
 715	    thread__set_namespaces(thread, 0, NULL))
 716		pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
 717
 718	return thread;
 719}
 720
 721struct comm *machine__thread_exec_comm(struct machine *machine,
 722				       struct thread *thread)
 723{
 724	if (machine->comm_exec)
 725		return thread__exec_comm(thread);
 726	else
 727		return thread__comm(thread);
 728}
 729
 730int machine__process_comm_event(struct machine *machine, union perf_event *event,
 731				struct perf_sample *sample)
 732{
 733	struct thread *thread = machine__findnew_thread(machine,
 734							event->comm.pid,
 735							event->comm.tid);
 736	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
 737	int err = 0;
 738
 739	if (exec)
 740		machine->comm_exec = true;
 741
 742	if (dump_trace)
 743		perf_event__fprintf_comm(event, stdout);
 744
 745	if (thread == NULL ||
 746	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
 747		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
 748		err = -1;
 749	}
 750
 751	thread__put(thread);
 752
 753	return err;
 754}
 755
 756int machine__process_namespaces_event(struct machine *machine __maybe_unused,
 757				      union perf_event *event,
 758				      struct perf_sample *sample __maybe_unused)
 759{
 760	struct thread *thread = machine__findnew_thread(machine,
 761							event->namespaces.pid,
 762							event->namespaces.tid);
 763	int err = 0;
 764
 765	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
 766		  "\nWARNING: kernel seems to support more namespaces than perf"
 767		  " tool.\nTry updating the perf tool..\n\n");
 768
 769	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
 770		  "\nWARNING: perf tool seems to support more namespaces than"
 771		  " the kernel.\nTry updating the kernel..\n\n");
 772
 773	if (dump_trace)
 774		perf_event__fprintf_namespaces(event, stdout);
 775
 776	if (thread == NULL ||
 777	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
 778		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
 779		err = -1;
 780	}
 781
 782	thread__put(thread);
 783
 784	return err;
 785}
 786
 787int machine__process_cgroup_event(struct machine *machine,
 788				  union perf_event *event,
 789				  struct perf_sample *sample __maybe_unused)
 790{
 791	struct cgroup *cgrp;
 792
 793	if (dump_trace)
 794		perf_event__fprintf_cgroup(event, stdout);
 795
 796	cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
 797	if (cgrp == NULL)
 798		return -ENOMEM;
 799
 800	return 0;
 801}
 802
 803int machine__process_lost_event(struct machine *machine __maybe_unused,
 804				union perf_event *event, struct perf_sample *sample __maybe_unused)
 805{
 806	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
 807		    event->lost.id, event->lost.lost);
 808	return 0;
 809}
 810
 811int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
 812					union perf_event *event, struct perf_sample *sample)
 813{
 814	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
 815		    sample->id, event->lost_samples.lost);
 816	return 0;
 817}
 818
 819static struct dso *machine__findnew_module_dso(struct machine *machine,
 820					       struct kmod_path *m,
 821					       const char *filename)
 822{
 823	struct dso *dso;
 824
 825	down_write(&machine->dsos.lock);
 826
 827	dso = __dsos__find(&machine->dsos, m->name, true);
 828	if (!dso) {
 829		dso = __dsos__addnew(&machine->dsos, m->name);
 830		if (dso == NULL)
 831			goto out_unlock;
 832
 833		dso__set_module_info(dso, m, machine);
 834		dso__set_long_name(dso, strdup(filename), true);
 835		dso->kernel = DSO_SPACE__KERNEL;
 836	}
 837
 838	dso__get(dso);
 839out_unlock:
 840	up_write(&machine->dsos.lock);
 841	return dso;
 842}
 843
 844int machine__process_aux_event(struct machine *machine __maybe_unused,
 845			       union perf_event *event)
 846{
 847	if (dump_trace)
 848		perf_event__fprintf_aux(event, stdout);
 849	return 0;
 850}
 851
 852int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
 853					union perf_event *event)
 854{
 855	if (dump_trace)
 856		perf_event__fprintf_itrace_start(event, stdout);
 857	return 0;
 858}
 859
 860int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
 861					    union perf_event *event)
 862{
 863	if (dump_trace)
 864		perf_event__fprintf_aux_output_hw_id(event, stdout);
 865	return 0;
 866}
 867
 868int machine__process_switch_event(struct machine *machine __maybe_unused,
 869				  union perf_event *event)
 870{
 871	if (dump_trace)
 872		perf_event__fprintf_switch(event, stdout);
 873	return 0;
 874}
 875
 876static int machine__process_ksymbol_register(struct machine *machine,
 877					     union perf_event *event,
 878					     struct perf_sample *sample __maybe_unused)
 879{
 880	struct symbol *sym;
 
 881	struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
 
 
 882
 883	if (!map) {
 884		struct dso *dso = dso__new(event->ksymbol.name);
 885
 886		if (dso) {
 887			dso->kernel = DSO_SPACE__KERNEL;
 888			map = map__new2(0, dso);
 889			dso__put(dso);
 890		}
 891
 892		if (!dso || !map) {
 893			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894		}
 895
 896		if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
 897			map->dso->binary_type = DSO_BINARY_TYPE__OOL;
 898			map->dso->data.file_size = event->ksymbol.len;
 899			dso__set_loaded(map->dso);
 
 
 900		}
 901
 902		map->start = event->ksymbol.addr;
 903		map->end = map->start + event->ksymbol.len;
 904		maps__insert(machine__kernel_maps(machine), map);
 905		map__put(map);
 906		dso__set_loaded(dso);
 907
 908		if (is_bpf_image(event->ksymbol.name)) {
 909			dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
 910			dso__set_long_name(dso, "", false);
 911		}
 
 
 912	}
 913
 914	sym = symbol__new(map->map_ip(map, map->start),
 915			  event->ksymbol.len,
 916			  0, 0, event->ksymbol.name);
 917	if (!sym)
 918		return -ENOMEM;
 919	dso__insert_symbol(map->dso, sym);
 920	return 0;
 
 
 
 
 
 921}
 922
 923static int machine__process_ksymbol_unregister(struct machine *machine,
 924					       union perf_event *event,
 925					       struct perf_sample *sample __maybe_unused)
 926{
 927	struct symbol *sym;
 928	struct map *map;
 929
 930	map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
 931	if (!map)
 932		return 0;
 933
 934	if (map != machine->vmlinux_map)
 935		maps__remove(machine__kernel_maps(machine), map);
 936	else {
 937		sym = dso__find_symbol(map->dso, map->map_ip(map, map->start));
 
 
 938		if (sym)
 939			dso__delete_symbol(map->dso, sym);
 940	}
 941
 942	return 0;
 943}
 944
 945int machine__process_ksymbol(struct machine *machine __maybe_unused,
 946			     union perf_event *event,
 947			     struct perf_sample *sample)
 948{
 949	if (dump_trace)
 950		perf_event__fprintf_ksymbol(event, stdout);
 951
 952	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
 953		return machine__process_ksymbol_unregister(machine, event,
 954							   sample);
 955	return machine__process_ksymbol_register(machine, event, sample);
 956}
 957
 958int machine__process_text_poke(struct machine *machine, union perf_event *event,
 959			       struct perf_sample *sample __maybe_unused)
 960{
 961	struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
 962	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
 963
 964	if (dump_trace)
 965		perf_event__fprintf_text_poke(event, machine, stdout);
 966
 967	if (!event->text_poke.new_len)
 968		return 0;
 969
 970	if (cpumode != PERF_RECORD_MISC_KERNEL) {
 971		pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
 972		return 0;
 973	}
 974
 975	if (map && map->dso) {
 976		u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
 977		int ret;
 978
 979		/*
 980		 * Kernel maps might be changed when loading symbols so loading
 981		 * must be done prior to using kernel maps.
 982		 */
 983		map__load(map);
 984		ret = dso__data_write_cache_addr(map->dso, map, machine,
 985						 event->text_poke.addr,
 986						 new_bytes,
 987						 event->text_poke.new_len);
 988		if (ret != event->text_poke.new_len)
 989			pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
 990				 event->text_poke.addr);
 991	} else {
 992		pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
 993			 event->text_poke.addr);
 994	}
 995
 996	return 0;
 997}
 998
 999static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
1000					      const char *filename)
1001{
1002	struct map *map = NULL;
1003	struct kmod_path m;
1004	struct dso *dso;
 
1005
1006	if (kmod_path__parse_name(&m, filename))
1007		return NULL;
1008
1009	dso = machine__findnew_module_dso(machine, &m, filename);
1010	if (dso == NULL)
1011		goto out;
1012
1013	map = map__new2(start, dso);
1014	if (map == NULL)
1015		goto out;
1016
1017	maps__insert(machine__kernel_maps(machine), map);
1018
1019	/* Put the map here because maps__insert already got it */
1020	map__put(map);
 
 
1021out:
1022	/* put the dso here, corresponding to  machine__findnew_module_dso */
1023	dso__put(dso);
1024	zfree(&m.name);
1025	return map;
1026}
1027
1028size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
1029{
1030	struct rb_node *nd;
1031	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
1032
1033	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1034		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1035		ret += __dsos__fprintf(&pos->dsos.head, fp);
1036	}
1037
1038	return ret;
1039}
1040
1041size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
1042				     bool (skip)(struct dso *dso, int parm), int parm)
1043{
1044	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
1045}
1046
1047size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
1048				     bool (skip)(struct dso *dso, int parm), int parm)
1049{
1050	struct rb_node *nd;
1051	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
1052
1053	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1054		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1055		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
1056	}
1057	return ret;
1058}
1059
1060size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
1061{
1062	int i;
1063	size_t printed = 0;
1064	struct dso *kdso = machine__kernel_dso(machine);
1065
1066	if (kdso->has_build_id) {
1067		char filename[PATH_MAX];
1068		if (dso__build_id_filename(kdso, filename, sizeof(filename),
1069					   false))
1070			printed += fprintf(fp, "[0] %s\n", filename);
1071	}
1072
1073	for (i = 0; i < vmlinux_path__nr_entries; ++i)
1074		printed += fprintf(fp, "[%d] %s\n",
1075				   i + kdso->has_build_id, vmlinux_path[i]);
1076
1077	return printed;
1078}
1079
1080size_t machine__fprintf(struct machine *machine, FILE *fp)
1081{
1082	struct rb_node *nd;
1083	size_t ret;
1084	int i;
1085
1086	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
1087		struct threads *threads = &machine->threads[i];
1088
1089		down_read(&threads->lock);
1090
1091		ret = fprintf(fp, "Threads: %u\n", threads->nr);
1092
1093		for (nd = rb_first_cached(&threads->entries); nd;
1094		     nd = rb_next(nd)) {
1095			struct thread *pos = rb_entry(nd, struct thread, rb_node);
1096
1097			ret += thread__fprintf(pos, fp);
1098		}
1099
1100		up_read(&threads->lock);
1101	}
1102	return ret;
1103}
1104
1105static struct dso *machine__get_kernel(struct machine *machine)
1106{
1107	const char *vmlinux_name = machine->mmap_name;
1108	struct dso *kernel;
1109
1110	if (machine__is_host(machine)) {
1111		if (symbol_conf.vmlinux_name)
1112			vmlinux_name = symbol_conf.vmlinux_name;
1113
1114		kernel = machine__findnew_kernel(machine, vmlinux_name,
1115						 "[kernel]", DSO_SPACE__KERNEL);
1116	} else {
1117		if (symbol_conf.default_guest_vmlinux_name)
1118			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1119
1120		kernel = machine__findnew_kernel(machine, vmlinux_name,
1121						 "[guest.kernel]",
1122						 DSO_SPACE__KERNEL_GUEST);
1123	}
1124
1125	if (kernel != NULL && (!kernel->has_build_id))
1126		dso__read_running_kernel_build_id(kernel, machine);
1127
1128	return kernel;
1129}
1130
1131void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1132				    size_t bufsz)
1133{
1134	if (machine__is_default_guest(machine))
1135		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1136	else
1137		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1138}
1139
1140const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1141
1142/* Figure out the start address of kernel map from /proc/kallsyms.
1143 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1144 * symbol_name if it's not that important.
1145 */
1146static int machine__get_running_kernel_start(struct machine *machine,
1147					     const char **symbol_name,
1148					     u64 *start, u64 *end)
1149{
1150	char filename[PATH_MAX];
1151	int i, err = -1;
1152	const char *name;
1153	u64 addr = 0;
1154
1155	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1156
1157	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1158		return 0;
1159
1160	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1161		err = kallsyms__get_function_start(filename, name, &addr);
1162		if (!err)
1163			break;
1164	}
1165
1166	if (err)
1167		return -1;
1168
1169	if (symbol_name)
1170		*symbol_name = name;
1171
1172	*start = addr;
1173
1174	err = kallsyms__get_function_start(filename, "_etext", &addr);
 
 
1175	if (!err)
1176		*end = addr;
1177
1178	return 0;
1179}
1180
1181int machine__create_extra_kernel_map(struct machine *machine,
1182				     struct dso *kernel,
1183				     struct extra_kernel_map *xm)
1184{
1185	struct kmap *kmap;
1186	struct map *map;
 
1187
1188	map = map__new2(xm->start, kernel);
1189	if (!map)
1190		return -1;
1191
1192	map->end   = xm->end;
1193	map->pgoff = xm->pgoff;
1194
1195	kmap = map__kmap(map);
1196
1197	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1198
1199	maps__insert(machine__kernel_maps(machine), map);
1200
1201	pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1202		  kmap->name, map->start, map->end);
 
 
1203
1204	map__put(map);
1205
1206	return 0;
1207}
1208
1209static u64 find_entry_trampoline(struct dso *dso)
1210{
1211	/* Duplicates are removed so lookup all aliases */
1212	const char *syms[] = {
1213		"_entry_trampoline",
1214		"__entry_trampoline_start",
1215		"entry_SYSCALL_64_trampoline",
1216	};
1217	struct symbol *sym = dso__first_symbol(dso);
1218	unsigned int i;
1219
1220	for (; sym; sym = dso__next_symbol(sym)) {
1221		if (sym->binding != STB_GLOBAL)
1222			continue;
1223		for (i = 0; i < ARRAY_SIZE(syms); i++) {
1224			if (!strcmp(sym->name, syms[i]))
1225				return sym->start;
1226		}
1227	}
1228
1229	return 0;
1230}
1231
1232/*
1233 * These values can be used for kernels that do not have symbols for the entry
1234 * trampolines in kallsyms.
1235 */
1236#define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
1237#define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
1238#define X86_64_ENTRY_TRAMPOLINE		0x6000
1239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240/* Map x86_64 PTI entry trampolines */
1241int machine__map_x86_64_entry_trampolines(struct machine *machine,
1242					  struct dso *kernel)
1243{
1244	struct maps *kmaps = machine__kernel_maps(machine);
 
 
 
1245	int nr_cpus_avail, cpu;
1246	bool found = false;
1247	struct map *map;
1248	u64 pgoff;
1249
1250	/*
1251	 * In the vmlinux case, pgoff is a virtual address which must now be
1252	 * mapped to a vmlinux offset.
1253	 */
1254	maps__for_each_entry(kmaps, map) {
1255		struct kmap *kmap = __map__kmap(map);
1256		struct map *dest_map;
1257
1258		if (!kmap || !is_entry_trampoline(kmap->name))
1259			continue;
1260
1261		dest_map = maps__find(kmaps, map->pgoff);
1262		if (dest_map != map)
1263			map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1264		found = true;
1265	}
1266	if (found || machine->trampolines_mapped)
1267		return 0;
1268
1269	pgoff = find_entry_trampoline(kernel);
1270	if (!pgoff)
1271		return 0;
1272
1273	nr_cpus_avail = machine__nr_cpus_avail(machine);
1274
1275	/* Add a 1 page map for each CPU's entry trampoline */
1276	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1277		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1278			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1279			 X86_64_ENTRY_TRAMPOLINE;
1280		struct extra_kernel_map xm = {
1281			.start = va,
1282			.end   = va + page_size,
1283			.pgoff = pgoff,
1284		};
1285
1286		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1287
1288		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1289			return -1;
1290	}
1291
1292	machine->trampolines_mapped = nr_cpus_avail;
1293
1294	return 0;
1295}
1296
1297int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1298					     struct dso *kernel __maybe_unused)
1299{
1300	return 0;
1301}
1302
1303static int
1304__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1305{
1306	/* In case of renewal the kernel map, destroy previous one */
1307	machine__destroy_kernel_maps(machine);
1308
 
1309	machine->vmlinux_map = map__new2(0, kernel);
1310	if (machine->vmlinux_map == NULL)
1311		return -1;
1312
1313	machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1314	maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1315	return 0;
1316}
1317
1318void machine__destroy_kernel_maps(struct machine *machine)
1319{
1320	struct kmap *kmap;
1321	struct map *map = machine__kernel_map(machine);
1322
1323	if (map == NULL)
1324		return;
1325
1326	kmap = map__kmap(map);
1327	maps__remove(machine__kernel_maps(machine), map);
1328	if (kmap && kmap->ref_reloc_sym) {
1329		zfree((char **)&kmap->ref_reloc_sym->name);
1330		zfree(&kmap->ref_reloc_sym);
1331	}
1332
1333	map__zput(machine->vmlinux_map);
1334}
1335
1336int machines__create_guest_kernel_maps(struct machines *machines)
1337{
1338	int ret = 0;
1339	struct dirent **namelist = NULL;
1340	int i, items = 0;
1341	char path[PATH_MAX];
1342	pid_t pid;
1343	char *endp;
1344
1345	if (symbol_conf.default_guest_vmlinux_name ||
1346	    symbol_conf.default_guest_modules ||
1347	    symbol_conf.default_guest_kallsyms) {
1348		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1349	}
1350
1351	if (symbol_conf.guestmount) {
1352		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1353		if (items <= 0)
1354			return -ENOENT;
1355		for (i = 0; i < items; i++) {
1356			if (!isdigit(namelist[i]->d_name[0])) {
1357				/* Filter out . and .. */
1358				continue;
1359			}
1360			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1361			if ((*endp != '\0') ||
1362			    (endp == namelist[i]->d_name) ||
1363			    (errno == ERANGE)) {
1364				pr_debug("invalid directory (%s). Skipping.\n",
1365					 namelist[i]->d_name);
1366				continue;
1367			}
1368			sprintf(path, "%s/%s/proc/kallsyms",
1369				symbol_conf.guestmount,
1370				namelist[i]->d_name);
1371			ret = access(path, R_OK);
1372			if (ret) {
1373				pr_debug("Can't access file %s\n", path);
1374				goto failure;
1375			}
1376			machines__create_kernel_maps(machines, pid);
1377		}
1378failure:
1379		free(namelist);
1380	}
1381
1382	return ret;
1383}
1384
1385void machines__destroy_kernel_maps(struct machines *machines)
1386{
1387	struct rb_node *next = rb_first_cached(&machines->guests);
1388
1389	machine__destroy_kernel_maps(&machines->host);
1390
1391	while (next) {
1392		struct machine *pos = rb_entry(next, struct machine, rb_node);
1393
1394		next = rb_next(&pos->rb_node);
1395		rb_erase_cached(&pos->rb_node, &machines->guests);
1396		machine__delete(pos);
1397	}
1398}
1399
1400int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1401{
1402	struct machine *machine = machines__findnew(machines, pid);
1403
1404	if (machine == NULL)
1405		return -1;
1406
1407	return machine__create_kernel_maps(machine);
1408}
1409
1410int machine__load_kallsyms(struct machine *machine, const char *filename)
1411{
1412	struct map *map = machine__kernel_map(machine);
1413	int ret = __dso__load_kallsyms(map->dso, filename, map, true);
 
1414
1415	if (ret > 0) {
1416		dso__set_loaded(map->dso);
1417		/*
1418		 * Since /proc/kallsyms will have multiple sessions for the
1419		 * kernel, with modules between them, fixup the end of all
1420		 * sections.
1421		 */
1422		maps__fixup_end(machine__kernel_maps(machine));
1423	}
1424
1425	return ret;
1426}
1427
1428int machine__load_vmlinux_path(struct machine *machine)
1429{
1430	struct map *map = machine__kernel_map(machine);
1431	int ret = dso__load_vmlinux_path(map->dso, map);
 
1432
1433	if (ret > 0)
1434		dso__set_loaded(map->dso);
1435
1436	return ret;
1437}
1438
1439static char *get_kernel_version(const char *root_dir)
1440{
1441	char version[PATH_MAX];
1442	FILE *file;
1443	char *name, *tmp;
1444	const char *prefix = "Linux version ";
1445
1446	sprintf(version, "%s/proc/version", root_dir);
1447	file = fopen(version, "r");
1448	if (!file)
1449		return NULL;
1450
1451	tmp = fgets(version, sizeof(version), file);
1452	fclose(file);
1453	if (!tmp)
1454		return NULL;
1455
1456	name = strstr(version, prefix);
1457	if (!name)
1458		return NULL;
1459	name += strlen(prefix);
1460	tmp = strchr(name, ' ');
1461	if (tmp)
1462		*tmp = '\0';
1463
1464	return strdup(name);
1465}
1466
1467static bool is_kmod_dso(struct dso *dso)
1468{
1469	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1470	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1471}
1472
1473static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1474{
1475	char *long_name;
 
1476	struct map *map = maps__find_by_name(maps, m->name);
1477
1478	if (map == NULL)
1479		return 0;
1480
1481	long_name = strdup(path);
1482	if (long_name == NULL)
1483		return -ENOMEM;
1484
1485	dso__set_long_name(map->dso, long_name, true);
1486	dso__kernel_module_get_build_id(map->dso, "");
 
1487
1488	/*
1489	 * Full name could reveal us kmod compression, so
1490	 * we need to update the symtab_type if needed.
1491	 */
1492	if (m->comp && is_kmod_dso(map->dso)) {
1493		map->dso->symtab_type++;
1494		map->dso->comp = m->comp;
1495	}
1496
1497	return 0;
1498}
1499
1500static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1501{
1502	struct dirent *dent;
1503	DIR *dir = opendir(dir_name);
1504	int ret = 0;
1505
1506	if (!dir) {
1507		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1508		return -1;
1509	}
1510
1511	while ((dent = readdir(dir)) != NULL) {
1512		char path[PATH_MAX];
1513		struct stat st;
1514
1515		/*sshfs might return bad dent->d_type, so we have to stat*/
1516		path__join(path, sizeof(path), dir_name, dent->d_name);
1517		if (stat(path, &st))
1518			continue;
1519
1520		if (S_ISDIR(st.st_mode)) {
1521			if (!strcmp(dent->d_name, ".") ||
1522			    !strcmp(dent->d_name, ".."))
1523				continue;
1524
1525			/* Do not follow top-level source and build symlinks */
1526			if (depth == 0) {
1527				if (!strcmp(dent->d_name, "source") ||
1528				    !strcmp(dent->d_name, "build"))
1529					continue;
1530			}
1531
1532			ret = maps__set_modules_path_dir(maps, path, depth + 1);
1533			if (ret < 0)
1534				goto out;
1535		} else {
1536			struct kmod_path m;
1537
1538			ret = kmod_path__parse_name(&m, dent->d_name);
1539			if (ret)
1540				goto out;
1541
1542			if (m.kmod)
1543				ret = maps__set_module_path(maps, path, &m);
1544
1545			zfree(&m.name);
1546
1547			if (ret)
1548				goto out;
1549		}
1550	}
1551
1552out:
1553	closedir(dir);
1554	return ret;
1555}
1556
1557static int machine__set_modules_path(struct machine *machine)
1558{
1559	char *version;
1560	char modules_path[PATH_MAX];
1561
1562	version = get_kernel_version(machine->root_dir);
1563	if (!version)
1564		return -1;
1565
1566	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1567		 machine->root_dir, version);
1568	free(version);
1569
1570	return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1571}
1572int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1573				u64 *size __maybe_unused,
1574				const char *name __maybe_unused)
1575{
1576	return 0;
1577}
1578
1579static int machine__create_module(void *arg, const char *name, u64 start,
1580				  u64 size)
1581{
1582	struct machine *machine = arg;
1583	struct map *map;
1584
1585	if (arch__fix_module_text_start(&start, &size, name) < 0)
1586		return -1;
1587
1588	map = machine__addnew_module_map(machine, start, name);
1589	if (map == NULL)
1590		return -1;
1591	map->end = start + size;
1592
1593	dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1594
 
 
1595	return 0;
1596}
1597
1598static int machine__create_modules(struct machine *machine)
1599{
1600	const char *modules;
1601	char path[PATH_MAX];
1602
1603	if (machine__is_default_guest(machine)) {
1604		modules = symbol_conf.default_guest_modules;
1605	} else {
1606		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1607		modules = path;
1608	}
1609
1610	if (symbol__restricted_filename(modules, "/proc/modules"))
1611		return -1;
1612
1613	if (modules__parse(modules, machine, machine__create_module))
1614		return -1;
1615
1616	if (!machine__set_modules_path(machine))
1617		return 0;
1618
1619	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1620
1621	return 0;
1622}
1623
1624static void machine__set_kernel_mmap(struct machine *machine,
1625				     u64 start, u64 end)
1626{
1627	machine->vmlinux_map->start = start;
1628	machine->vmlinux_map->end   = end;
1629	/*
1630	 * Be a bit paranoid here, some perf.data file came with
1631	 * a zero sized synthesized MMAP event for the kernel.
1632	 */
1633	if (start == 0 && end == 0)
1634		machine->vmlinux_map->end = ~0ULL;
1635}
1636
1637static void machine__update_kernel_mmap(struct machine *machine,
1638				     u64 start, u64 end)
1639{
1640	struct map *map = machine__kernel_map(machine);
 
1641
1642	map__get(map);
1643	maps__remove(machine__kernel_maps(machine), map);
1644
 
1645	machine__set_kernel_mmap(machine, start, end);
 
 
 
1646
1647	maps__insert(machine__kernel_maps(machine), map);
1648	map__put(map);
1649}
1650
1651int machine__create_kernel_maps(struct machine *machine)
1652{
1653	struct dso *kernel = machine__get_kernel(machine);
1654	const char *name = NULL;
1655	struct map *map;
1656	u64 start = 0, end = ~0ULL;
1657	int ret;
1658
1659	if (kernel == NULL)
1660		return -1;
1661
1662	ret = __machine__create_kernel_maps(machine, kernel);
1663	if (ret < 0)
1664		goto out_put;
1665
1666	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1667		if (machine__is_host(machine))
1668			pr_debug("Problems creating module maps, "
1669				 "continuing anyway...\n");
1670		else
1671			pr_debug("Problems creating module maps for guest %d, "
1672				 "continuing anyway...\n", machine->pid);
1673	}
1674
1675	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1676		if (name &&
1677		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1678			machine__destroy_kernel_maps(machine);
1679			ret = -1;
1680			goto out_put;
1681		}
1682
1683		/*
1684		 * we have a real start address now, so re-order the kmaps
1685		 * assume it's the last in the kmaps
1686		 */
1687		machine__update_kernel_mmap(machine, start, end);
 
 
1688	}
1689
1690	if (machine__create_extra_kernel_maps(machine, kernel))
1691		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1692
1693	if (end == ~0ULL) {
1694		/* update end address of the kernel map using adjacent module address */
1695		map = map__next(machine__kernel_map(machine));
1696		if (map)
1697			machine__set_kernel_mmap(machine, start, map->start);
 
 
1698	}
1699
1700out_put:
1701	dso__put(kernel);
1702	return ret;
1703}
1704
1705static bool machine__uses_kcore(struct machine *machine)
1706{
1707	struct dso *dso;
1708
1709	list_for_each_entry(dso, &machine->dsos.head, node) {
1710		if (dso__is_kcore(dso))
1711			return true;
1712	}
1713
1714	return false;
1715}
1716
1717static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1718					     struct extra_kernel_map *xm)
1719{
1720	return machine__is(machine, "x86_64") &&
1721	       is_entry_trampoline(xm->name);
1722}
1723
1724static int machine__process_extra_kernel_map(struct machine *machine,
1725					     struct extra_kernel_map *xm)
1726{
1727	struct dso *kernel = machine__kernel_dso(machine);
1728
1729	if (kernel == NULL)
1730		return -1;
1731
1732	return machine__create_extra_kernel_map(machine, kernel, xm);
1733}
1734
1735static int machine__process_kernel_mmap_event(struct machine *machine,
1736					      struct extra_kernel_map *xm,
1737					      struct build_id *bid)
1738{
1739	struct map *map;
1740	enum dso_space_type dso_space;
1741	bool is_kernel_mmap;
1742	const char *mmap_name = machine->mmap_name;
1743
1744	/* If we have maps from kcore then we do not need or want any others */
1745	if (machine__uses_kcore(machine))
1746		return 0;
1747
1748	if (machine__is_host(machine))
1749		dso_space = DSO_SPACE__KERNEL;
1750	else
1751		dso_space = DSO_SPACE__KERNEL_GUEST;
1752
1753	is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1754	if (!is_kernel_mmap && !machine__is_host(machine)) {
1755		/*
1756		 * If the event was recorded inside the guest and injected into
1757		 * the host perf.data file, then it will match a host mmap_name,
1758		 * so try that - see machine__set_mmap_name().
1759		 */
1760		mmap_name = "[kernel.kallsyms]";
1761		is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1762	}
1763	if (xm->name[0] == '/' ||
1764	    (!is_kernel_mmap && xm->name[0] == '[')) {
1765		map = machine__addnew_module_map(machine, xm->start,
1766						 xm->name);
1767		if (map == NULL)
1768			goto out_problem;
1769
1770		map->end = map->start + xm->end - xm->start;
1771
1772		if (build_id__is_defined(bid))
1773			dso__set_build_id(map->dso, bid);
1774
 
1775	} else if (is_kernel_mmap) {
1776		const char *symbol_name = xm->name + strlen(mmap_name);
1777		/*
1778		 * Should be there already, from the build-id table in
1779		 * the header.
1780		 */
1781		struct dso *kernel = NULL;
1782		struct dso *dso;
1783
1784		down_read(&machine->dsos.lock);
1785
1786		list_for_each_entry(dso, &machine->dsos.head, node) {
1787
1788			/*
1789			 * The cpumode passed to is_kernel_module is not the
1790			 * cpumode of *this* event. If we insist on passing
1791			 * correct cpumode to is_kernel_module, we should
1792			 * record the cpumode when we adding this dso to the
1793			 * linked list.
1794			 *
1795			 * However we don't really need passing correct
1796			 * cpumode.  We know the correct cpumode must be kernel
1797			 * mode (if not, we should not link it onto kernel_dsos
1798			 * list).
1799			 *
1800			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1801			 * is_kernel_module() treats it as a kernel cpumode.
1802			 */
1803
1804			if (!dso->kernel ||
1805			    is_kernel_module(dso->long_name,
1806					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1807				continue;
1808
1809
1810			kernel = dso;
1811			break;
1812		}
1813
1814		up_read(&machine->dsos.lock);
1815
1816		if (kernel == NULL)
1817			kernel = machine__findnew_dso(machine, machine->mmap_name);
1818		if (kernel == NULL)
1819			goto out_problem;
1820
1821		kernel->kernel = dso_space;
1822		if (__machine__create_kernel_maps(machine, kernel) < 0) {
1823			dso__put(kernel);
1824			goto out_problem;
1825		}
1826
1827		if (strstr(kernel->long_name, "vmlinux"))
1828			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1829
1830		machine__update_kernel_mmap(machine, xm->start, xm->end);
 
 
 
1831
1832		if (build_id__is_defined(bid))
1833			dso__set_build_id(kernel, bid);
1834
1835		/*
1836		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1837		 * symbol. Effectively having zero here means that at record
1838		 * time /proc/sys/kernel/kptr_restrict was non zero.
1839		 */
1840		if (xm->pgoff != 0) {
1841			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1842							symbol_name,
1843							xm->pgoff);
1844		}
1845
1846		if (machine__is_default_guest(machine)) {
1847			/*
1848			 * preload dso of guest kernel and modules
1849			 */
1850			dso__load(kernel, machine__kernel_map(machine));
1851		}
 
1852	} else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1853		return machine__process_extra_kernel_map(machine, xm);
1854	}
1855	return 0;
1856out_problem:
1857	return -1;
1858}
1859
1860int machine__process_mmap2_event(struct machine *machine,
1861				 union perf_event *event,
1862				 struct perf_sample *sample)
1863{
1864	struct thread *thread;
1865	struct map *map;
1866	struct dso_id dso_id = {
1867		.maj = event->mmap2.maj,
1868		.min = event->mmap2.min,
1869		.ino = event->mmap2.ino,
1870		.ino_generation = event->mmap2.ino_generation,
1871	};
1872	struct build_id __bid, *bid = NULL;
1873	int ret = 0;
1874
1875	if (dump_trace)
1876		perf_event__fprintf_mmap2(event, stdout);
1877
1878	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1879		bid = &__bid;
1880		build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1881	}
1882
1883	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1884	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1885		struct extra_kernel_map xm = {
1886			.start = event->mmap2.start,
1887			.end   = event->mmap2.start + event->mmap2.len,
1888			.pgoff = event->mmap2.pgoff,
1889		};
1890
1891		strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1892		ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1893		if (ret < 0)
1894			goto out_problem;
1895		return 0;
1896	}
1897
1898	thread = machine__findnew_thread(machine, event->mmap2.pid,
1899					event->mmap2.tid);
1900	if (thread == NULL)
1901		goto out_problem;
1902
1903	map = map__new(machine, event->mmap2.start,
1904			event->mmap2.len, event->mmap2.pgoff,
1905			&dso_id, event->mmap2.prot,
1906			event->mmap2.flags, bid,
1907			event->mmap2.filename, thread);
1908
1909	if (map == NULL)
1910		goto out_problem_map;
1911
1912	ret = thread__insert_map(thread, map);
1913	if (ret)
1914		goto out_problem_insert;
1915
1916	thread__put(thread);
1917	map__put(map);
1918	return 0;
1919
1920out_problem_insert:
1921	map__put(map);
1922out_problem_map:
1923	thread__put(thread);
1924out_problem:
1925	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1926	return 0;
1927}
1928
1929int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1930				struct perf_sample *sample)
1931{
1932	struct thread *thread;
1933	struct map *map;
1934	u32 prot = 0;
1935	int ret = 0;
1936
1937	if (dump_trace)
1938		perf_event__fprintf_mmap(event, stdout);
1939
1940	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1941	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1942		struct extra_kernel_map xm = {
1943			.start = event->mmap.start,
1944			.end   = event->mmap.start + event->mmap.len,
1945			.pgoff = event->mmap.pgoff,
1946		};
1947
1948		strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1949		ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1950		if (ret < 0)
1951			goto out_problem;
1952		return 0;
1953	}
1954
1955	thread = machine__findnew_thread(machine, event->mmap.pid,
1956					 event->mmap.tid);
1957	if (thread == NULL)
1958		goto out_problem;
1959
1960	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1961		prot = PROT_EXEC;
1962
1963	map = map__new(machine, event->mmap.start,
1964			event->mmap.len, event->mmap.pgoff,
1965			NULL, prot, 0, NULL, event->mmap.filename, thread);
1966
1967	if (map == NULL)
1968		goto out_problem_map;
1969
1970	ret = thread__insert_map(thread, map);
1971	if (ret)
1972		goto out_problem_insert;
1973
1974	thread__put(thread);
1975	map__put(map);
1976	return 0;
1977
1978out_problem_insert:
1979	map__put(map);
1980out_problem_map:
1981	thread__put(thread);
1982out_problem:
1983	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1984	return 0;
1985}
1986
1987static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
 
1988{
1989	struct threads *threads = machine__threads(machine, th->tid);
 
 
 
1990
1991	if (threads->last_match == th)
1992		threads__set_last_match(threads, NULL);
1993
1994	if (lock)
1995		down_write(&threads->lock);
1996
1997	BUG_ON(refcount_read(&th->refcnt) == 0);
1998
1999	rb_erase_cached(&th->rb_node, &threads->entries);
2000	RB_CLEAR_NODE(&th->rb_node);
 
2001	--threads->nr;
2002	/*
2003	 * Move it first to the dead_threads list, then drop the reference,
2004	 * if this is the last reference, then the thread__delete destructor
2005	 * will be called and we will remove it from the dead_threads list.
2006	 */
2007	list_add_tail(&th->node, &threads->dead);
2008
2009	/*
2010	 * We need to do the put here because if this is the last refcount,
2011	 * then we will be touching the threads->dead head when removing the
2012	 * thread.
2013	 */
2014	thread__put(th);
2015
2016	if (lock)
2017		up_write(&threads->lock);
2018}
2019
2020void machine__remove_thread(struct machine *machine, struct thread *th)
2021{
2022	return __machine__remove_thread(machine, th, true);
2023}
2024
2025int machine__process_fork_event(struct machine *machine, union perf_event *event,
2026				struct perf_sample *sample)
2027{
2028	struct thread *thread = machine__find_thread(machine,
2029						     event->fork.pid,
2030						     event->fork.tid);
2031	struct thread *parent = machine__findnew_thread(machine,
2032							event->fork.ppid,
2033							event->fork.ptid);
2034	bool do_maps_clone = true;
2035	int err = 0;
2036
2037	if (dump_trace)
2038		perf_event__fprintf_task(event, stdout);
2039
2040	/*
2041	 * There may be an existing thread that is not actually the parent,
2042	 * either because we are processing events out of order, or because the
2043	 * (fork) event that would have removed the thread was lost. Assume the
2044	 * latter case and continue on as best we can.
2045	 */
2046	if (parent->pid_ != (pid_t)event->fork.ppid) {
2047		dump_printf("removing erroneous parent thread %d/%d\n",
2048			    parent->pid_, parent->tid);
2049		machine__remove_thread(machine, parent);
2050		thread__put(parent);
2051		parent = machine__findnew_thread(machine, event->fork.ppid,
2052						 event->fork.ptid);
2053	}
2054
2055	/* if a thread currently exists for the thread id remove it */
2056	if (thread != NULL) {
2057		machine__remove_thread(machine, thread);
2058		thread__put(thread);
2059	}
2060
2061	thread = machine__findnew_thread(machine, event->fork.pid,
2062					 event->fork.tid);
2063	/*
2064	 * When synthesizing FORK events, we are trying to create thread
2065	 * objects for the already running tasks on the machine.
2066	 *
2067	 * Normally, for a kernel FORK event, we want to clone the parent's
2068	 * maps because that is what the kernel just did.
2069	 *
2070	 * But when synthesizing, this should not be done.  If we do, we end up
2071	 * with overlapping maps as we process the synthesized MMAP2 events that
2072	 * get delivered shortly thereafter.
2073	 *
2074	 * Use the FORK event misc flags in an internal way to signal this
2075	 * situation, so we can elide the map clone when appropriate.
2076	 */
2077	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
2078		do_maps_clone = false;
2079
2080	if (thread == NULL || parent == NULL ||
2081	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
2082		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
2083		err = -1;
2084	}
2085	thread__put(thread);
2086	thread__put(parent);
2087
2088	return err;
2089}
2090
2091int machine__process_exit_event(struct machine *machine, union perf_event *event,
2092				struct perf_sample *sample __maybe_unused)
2093{
2094	struct thread *thread = machine__find_thread(machine,
2095						     event->fork.pid,
2096						     event->fork.tid);
2097
2098	if (dump_trace)
2099		perf_event__fprintf_task(event, stdout);
2100
2101	if (thread != NULL) {
2102		thread__exited(thread);
2103		thread__put(thread);
 
 
2104	}
2105
2106	return 0;
2107}
2108
2109int machine__process_event(struct machine *machine, union perf_event *event,
2110			   struct perf_sample *sample)
2111{
2112	int ret;
2113
2114	switch (event->header.type) {
2115	case PERF_RECORD_COMM:
2116		ret = machine__process_comm_event(machine, event, sample); break;
2117	case PERF_RECORD_MMAP:
2118		ret = machine__process_mmap_event(machine, event, sample); break;
2119	case PERF_RECORD_NAMESPACES:
2120		ret = machine__process_namespaces_event(machine, event, sample); break;
2121	case PERF_RECORD_CGROUP:
2122		ret = machine__process_cgroup_event(machine, event, sample); break;
2123	case PERF_RECORD_MMAP2:
2124		ret = machine__process_mmap2_event(machine, event, sample); break;
2125	case PERF_RECORD_FORK:
2126		ret = machine__process_fork_event(machine, event, sample); break;
2127	case PERF_RECORD_EXIT:
2128		ret = machine__process_exit_event(machine, event, sample); break;
2129	case PERF_RECORD_LOST:
2130		ret = machine__process_lost_event(machine, event, sample); break;
2131	case PERF_RECORD_AUX:
2132		ret = machine__process_aux_event(machine, event); break;
2133	case PERF_RECORD_ITRACE_START:
2134		ret = machine__process_itrace_start_event(machine, event); break;
2135	case PERF_RECORD_LOST_SAMPLES:
2136		ret = machine__process_lost_samples_event(machine, event, sample); break;
2137	case PERF_RECORD_SWITCH:
2138	case PERF_RECORD_SWITCH_CPU_WIDE:
2139		ret = machine__process_switch_event(machine, event); break;
2140	case PERF_RECORD_KSYMBOL:
2141		ret = machine__process_ksymbol(machine, event, sample); break;
2142	case PERF_RECORD_BPF_EVENT:
2143		ret = machine__process_bpf(machine, event, sample); break;
2144	case PERF_RECORD_TEXT_POKE:
2145		ret = machine__process_text_poke(machine, event, sample); break;
2146	case PERF_RECORD_AUX_OUTPUT_HW_ID:
2147		ret = machine__process_aux_output_hw_id_event(machine, event); break;
2148	default:
2149		ret = -1;
2150		break;
2151	}
2152
2153	return ret;
2154}
2155
2156static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
2157{
2158	if (!regexec(regex, sym->name, 0, NULL, 0))
2159		return true;
2160	return false;
2161}
2162
2163static void ip__resolve_ams(struct thread *thread,
2164			    struct addr_map_symbol *ams,
2165			    u64 ip)
2166{
2167	struct addr_location al;
2168
2169	memset(&al, 0, sizeof(al));
2170	/*
2171	 * We cannot use the header.misc hint to determine whether a
2172	 * branch stack address is user, kernel, guest, hypervisor.
2173	 * Branches may straddle the kernel/user/hypervisor boundaries.
2174	 * Thus, we have to try consecutively until we find a match
2175	 * or else, the symbol is unknown
2176	 */
2177	thread__find_cpumode_addr_location(thread, ip, &al);
2178
2179	ams->addr = ip;
2180	ams->al_addr = al.addr;
2181	ams->al_level = al.level;
2182	ams->ms.maps = al.maps;
2183	ams->ms.sym = al.sym;
2184	ams->ms.map = al.map;
2185	ams->phys_addr = 0;
2186	ams->data_page_size = 0;
 
2187}
2188
2189static void ip__resolve_data(struct thread *thread,
2190			     u8 m, struct addr_map_symbol *ams,
2191			     u64 addr, u64 phys_addr, u64 daddr_page_size)
2192{
2193	struct addr_location al;
2194
2195	memset(&al, 0, sizeof(al));
2196
2197	thread__find_symbol(thread, m, addr, &al);
2198
2199	ams->addr = addr;
2200	ams->al_addr = al.addr;
2201	ams->al_level = al.level;
2202	ams->ms.maps = al.maps;
2203	ams->ms.sym = al.sym;
2204	ams->ms.map = al.map;
2205	ams->phys_addr = phys_addr;
2206	ams->data_page_size = daddr_page_size;
 
2207}
2208
2209struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2210				     struct addr_location *al)
2211{
2212	struct mem_info *mi = mem_info__new();
2213
2214	if (!mi)
2215		return NULL;
2216
2217	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2218	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2219			 sample->addr, sample->phys_addr,
2220			 sample->data_page_size);
2221	mi->data_src.val = sample->data_src;
2222
2223	return mi;
2224}
2225
2226static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2227{
2228	struct map *map = ms->map;
2229	char *srcline = NULL;
 
2230
2231	if (!map || callchain_param.key == CCKEY_FUNCTION)
2232		return srcline;
2233
2234	srcline = srcline__tree_find(&map->dso->srclines, ip);
 
2235	if (!srcline) {
2236		bool show_sym = false;
2237		bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2238
2239		srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2240				      ms->sym, show_sym, show_addr, ip);
2241		srcline__tree_insert(&map->dso->srclines, ip, srcline);
2242	}
2243
2244	return srcline;
2245}
2246
2247struct iterations {
2248	int nr_loop_iter;
2249	u64 cycles;
2250};
2251
2252static int add_callchain_ip(struct thread *thread,
2253			    struct callchain_cursor *cursor,
2254			    struct symbol **parent,
2255			    struct addr_location *root_al,
2256			    u8 *cpumode,
2257			    u64 ip,
2258			    bool branch,
2259			    struct branch_flags *flags,
2260			    struct iterations *iter,
2261			    u64 branch_from)
2262{
2263	struct map_symbol ms;
2264	struct addr_location al;
2265	int nr_loop_iter = 0;
2266	u64 iter_cycles = 0;
2267	const char *srcline = NULL;
2268
 
2269	al.filtered = 0;
2270	al.sym = NULL;
2271	al.srcline = NULL;
2272	if (!cpumode) {
2273		thread__find_cpumode_addr_location(thread, ip, &al);
2274	} else {
2275		if (ip >= PERF_CONTEXT_MAX) {
2276			switch (ip) {
2277			case PERF_CONTEXT_HV:
2278				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2279				break;
2280			case PERF_CONTEXT_KERNEL:
2281				*cpumode = PERF_RECORD_MISC_KERNEL;
2282				break;
2283			case PERF_CONTEXT_USER:
2284				*cpumode = PERF_RECORD_MISC_USER;
2285				break;
2286			default:
2287				pr_debug("invalid callchain context: "
2288					 "%"PRId64"\n", (s64) ip);
2289				/*
2290				 * It seems the callchain is corrupted.
2291				 * Discard all.
2292				 */
2293				callchain_cursor_reset(cursor);
2294				return 1;
 
2295			}
2296			return 0;
2297		}
2298		thread__find_symbol(thread, *cpumode, ip, &al);
2299	}
2300
2301	if (al.sym != NULL) {
2302		if (perf_hpp_list.parent && !*parent &&
2303		    symbol__match_regex(al.sym, &parent_regex))
2304			*parent = al.sym;
2305		else if (have_ignore_callees && root_al &&
2306		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
2307			/* Treat this symbol as the root,
2308			   forgetting its callees. */
2309			*root_al = al;
2310			callchain_cursor_reset(cursor);
2311		}
2312	}
2313
2314	if (symbol_conf.hide_unresolved && al.sym == NULL)
2315		return 0;
2316
2317	if (iter) {
2318		nr_loop_iter = iter->nr_loop_iter;
2319		iter_cycles = iter->cycles;
2320	}
2321
2322	ms.maps = al.maps;
2323	ms.map = al.map;
2324	ms.sym = al.sym;
2325	srcline = callchain_srcline(&ms, al.addr);
2326	return callchain_cursor_append(cursor, ip, &ms,
2327				       branch, flags, nr_loop_iter,
2328				       iter_cycles, branch_from, srcline);
 
 
 
 
2329}
2330
2331struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2332					   struct addr_location *al)
2333{
2334	unsigned int i;
2335	const struct branch_stack *bs = sample->branch_stack;
2336	struct branch_entry *entries = perf_sample__branch_entries(sample);
2337	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2338
2339	if (!bi)
2340		return NULL;
2341
2342	for (i = 0; i < bs->nr; i++) {
2343		ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2344		ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2345		bi[i].flags = entries[i].flags;
2346	}
2347	return bi;
2348}
2349
2350static void save_iterations(struct iterations *iter,
2351			    struct branch_entry *be, int nr)
2352{
2353	int i;
2354
2355	iter->nr_loop_iter++;
2356	iter->cycles = 0;
2357
2358	for (i = 0; i < nr; i++)
2359		iter->cycles += be[i].flags.cycles;
2360}
2361
2362#define CHASHSZ 127
2363#define CHASHBITS 7
2364#define NO_ENTRY 0xff
2365
2366#define PERF_MAX_BRANCH_DEPTH 127
2367
2368/* Remove loops. */
2369static int remove_loops(struct branch_entry *l, int nr,
2370			struct iterations *iter)
2371{
2372	int i, j, off;
2373	unsigned char chash[CHASHSZ];
2374
2375	memset(chash, NO_ENTRY, sizeof(chash));
2376
2377	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2378
2379	for (i = 0; i < nr; i++) {
2380		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2381
2382		/* no collision handling for now */
2383		if (chash[h] == NO_ENTRY) {
2384			chash[h] = i;
2385		} else if (l[chash[h]].from == l[i].from) {
2386			bool is_loop = true;
2387			/* check if it is a real loop */
2388			off = 0;
2389			for (j = chash[h]; j < i && i + off < nr; j++, off++)
2390				if (l[j].from != l[i + off].from) {
2391					is_loop = false;
2392					break;
2393				}
2394			if (is_loop) {
2395				j = nr - (i + off);
2396				if (j > 0) {
2397					save_iterations(iter + i + off,
2398						l + i, off);
2399
2400					memmove(iter + i, iter + i + off,
2401						j * sizeof(*iter));
2402
2403					memmove(l + i, l + i + off,
2404						j * sizeof(*l));
2405				}
2406
2407				nr -= off;
2408			}
2409		}
2410	}
2411	return nr;
2412}
2413
2414static int lbr_callchain_add_kernel_ip(struct thread *thread,
2415				       struct callchain_cursor *cursor,
2416				       struct perf_sample *sample,
2417				       struct symbol **parent,
2418				       struct addr_location *root_al,
2419				       u64 branch_from,
2420				       bool callee, int end)
2421{
2422	struct ip_callchain *chain = sample->callchain;
2423	u8 cpumode = PERF_RECORD_MISC_USER;
2424	int err, i;
2425
2426	if (callee) {
2427		for (i = 0; i < end + 1; i++) {
2428			err = add_callchain_ip(thread, cursor, parent,
2429					       root_al, &cpumode, chain->ips[i],
2430					       false, NULL, NULL, branch_from);
2431			if (err)
2432				return err;
2433		}
2434		return 0;
2435	}
2436
2437	for (i = end; i >= 0; i--) {
2438		err = add_callchain_ip(thread, cursor, parent,
2439				       root_al, &cpumode, chain->ips[i],
2440				       false, NULL, NULL, branch_from);
2441		if (err)
2442			return err;
2443	}
2444
2445	return 0;
2446}
2447
2448static void save_lbr_cursor_node(struct thread *thread,
2449				 struct callchain_cursor *cursor,
2450				 int idx)
2451{
2452	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2453
2454	if (!lbr_stitch)
2455		return;
2456
2457	if (cursor->pos == cursor->nr) {
2458		lbr_stitch->prev_lbr_cursor[idx].valid = false;
2459		return;
2460	}
2461
2462	if (!cursor->curr)
2463		cursor->curr = cursor->first;
2464	else
2465		cursor->curr = cursor->curr->next;
2466	memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2467	       sizeof(struct callchain_cursor_node));
2468
2469	lbr_stitch->prev_lbr_cursor[idx].valid = true;
2470	cursor->pos++;
2471}
2472
2473static int lbr_callchain_add_lbr_ip(struct thread *thread,
2474				    struct callchain_cursor *cursor,
2475				    struct perf_sample *sample,
2476				    struct symbol **parent,
2477				    struct addr_location *root_al,
2478				    u64 *branch_from,
2479				    bool callee)
2480{
2481	struct branch_stack *lbr_stack = sample->branch_stack;
2482	struct branch_entry *entries = perf_sample__branch_entries(sample);
2483	u8 cpumode = PERF_RECORD_MISC_USER;
2484	int lbr_nr = lbr_stack->nr;
2485	struct branch_flags *flags;
2486	int err, i;
2487	u64 ip;
2488
2489	/*
2490	 * The curr and pos are not used in writing session. They are cleared
2491	 * in callchain_cursor_commit() when the writing session is closed.
2492	 * Using curr and pos to track the current cursor node.
2493	 */
2494	if (thread->lbr_stitch) {
2495		cursor->curr = NULL;
2496		cursor->pos = cursor->nr;
2497		if (cursor->nr) {
2498			cursor->curr = cursor->first;
2499			for (i = 0; i < (int)(cursor->nr - 1); i++)
2500				cursor->curr = cursor->curr->next;
2501		}
2502	}
2503
2504	if (callee) {
2505		/* Add LBR ip from first entries.to */
2506		ip = entries[0].to;
2507		flags = &entries[0].flags;
2508		*branch_from = entries[0].from;
2509		err = add_callchain_ip(thread, cursor, parent,
2510				       root_al, &cpumode, ip,
2511				       true, flags, NULL,
2512				       *branch_from);
2513		if (err)
2514			return err;
2515
2516		/*
2517		 * The number of cursor node increases.
2518		 * Move the current cursor node.
2519		 * But does not need to save current cursor node for entry 0.
2520		 * It's impossible to stitch the whole LBRs of previous sample.
2521		 */
2522		if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2523			if (!cursor->curr)
2524				cursor->curr = cursor->first;
2525			else
2526				cursor->curr = cursor->curr->next;
2527			cursor->pos++;
2528		}
2529
2530		/* Add LBR ip from entries.from one by one. */
2531		for (i = 0; i < lbr_nr; i++) {
2532			ip = entries[i].from;
2533			flags = &entries[i].flags;
2534			err = add_callchain_ip(thread, cursor, parent,
2535					       root_al, &cpumode, ip,
2536					       true, flags, NULL,
2537					       *branch_from);
2538			if (err)
2539				return err;
2540			save_lbr_cursor_node(thread, cursor, i);
2541		}
2542		return 0;
2543	}
2544
2545	/* Add LBR ip from entries.from one by one. */
2546	for (i = lbr_nr - 1; i >= 0; i--) {
2547		ip = entries[i].from;
2548		flags = &entries[i].flags;
2549		err = add_callchain_ip(thread, cursor, parent,
2550				       root_al, &cpumode, ip,
2551				       true, flags, NULL,
2552				       *branch_from);
2553		if (err)
2554			return err;
2555		save_lbr_cursor_node(thread, cursor, i);
2556	}
2557
2558	/* Add LBR ip from first entries.to */
2559	ip = entries[0].to;
2560	flags = &entries[0].flags;
2561	*branch_from = entries[0].from;
2562	err = add_callchain_ip(thread, cursor, parent,
2563			       root_al, &cpumode, ip,
2564			       true, flags, NULL,
2565			       *branch_from);
2566	if (err)
2567		return err;
 
 
2568
2569	return 0;
2570}
2571
2572static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2573					     struct callchain_cursor *cursor)
2574{
2575	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2576	struct callchain_cursor_node *cnode;
2577	struct stitch_list *stitch_node;
2578	int err;
2579
2580	list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2581		cnode = &stitch_node->cursor;
2582
2583		err = callchain_cursor_append(cursor, cnode->ip,
2584					      &cnode->ms,
2585					      cnode->branch,
2586					      &cnode->branch_flags,
2587					      cnode->nr_loop_iter,
2588					      cnode->iter_cycles,
2589					      cnode->branch_from,
2590					      cnode->srcline);
2591		if (err)
2592			return err;
2593	}
2594	return 0;
2595}
2596
2597static struct stitch_list *get_stitch_node(struct thread *thread)
2598{
2599	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2600	struct stitch_list *stitch_node;
2601
2602	if (!list_empty(&lbr_stitch->free_lists)) {
2603		stitch_node = list_first_entry(&lbr_stitch->free_lists,
2604					       struct stitch_list, node);
2605		list_del(&stitch_node->node);
2606
2607		return stitch_node;
2608	}
2609
2610	return malloc(sizeof(struct stitch_list));
2611}
2612
2613static bool has_stitched_lbr(struct thread *thread,
2614			     struct perf_sample *cur,
2615			     struct perf_sample *prev,
2616			     unsigned int max_lbr,
2617			     bool callee)
2618{
2619	struct branch_stack *cur_stack = cur->branch_stack;
2620	struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2621	struct branch_stack *prev_stack = prev->branch_stack;
2622	struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2623	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2624	int i, j, nr_identical_branches = 0;
2625	struct stitch_list *stitch_node;
2626	u64 cur_base, distance;
2627
2628	if (!cur_stack || !prev_stack)
2629		return false;
2630
2631	/* Find the physical index of the base-of-stack for current sample. */
2632	cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2633
2634	distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2635						     (max_lbr + prev_stack->hw_idx - cur_base);
2636	/* Previous sample has shorter stack. Nothing can be stitched. */
2637	if (distance + 1 > prev_stack->nr)
2638		return false;
2639
2640	/*
2641	 * Check if there are identical LBRs between two samples.
2642	 * Identical LBRs must have same from, to and flags values. Also,
2643	 * they have to be saved in the same LBR registers (same physical
2644	 * index).
2645	 *
2646	 * Starts from the base-of-stack of current sample.
2647	 */
2648	for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2649		if ((prev_entries[i].from != cur_entries[j].from) ||
2650		    (prev_entries[i].to != cur_entries[j].to) ||
2651		    (prev_entries[i].flags.value != cur_entries[j].flags.value))
2652			break;
2653		nr_identical_branches++;
2654	}
2655
2656	if (!nr_identical_branches)
2657		return false;
2658
2659	/*
2660	 * Save the LBRs between the base-of-stack of previous sample
2661	 * and the base-of-stack of current sample into lbr_stitch->lists.
2662	 * These LBRs will be stitched later.
2663	 */
2664	for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2665
2666		if (!lbr_stitch->prev_lbr_cursor[i].valid)
2667			continue;
2668
2669		stitch_node = get_stitch_node(thread);
2670		if (!stitch_node)
2671			return false;
2672
2673		memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2674		       sizeof(struct callchain_cursor_node));
2675
2676		if (callee)
2677			list_add(&stitch_node->node, &lbr_stitch->lists);
2678		else
2679			list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2680	}
2681
2682	return true;
2683}
2684
2685static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2686{
2687	if (thread->lbr_stitch)
2688		return true;
2689
2690	thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2691	if (!thread->lbr_stitch)
2692		goto err;
2693
2694	thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2695	if (!thread->lbr_stitch->prev_lbr_cursor)
 
2696		goto free_lbr_stitch;
2697
2698	INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2699	INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2700
2701	return true;
2702
2703free_lbr_stitch:
2704	zfree(&thread->lbr_stitch);
 
2705err:
2706	pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2707	thread->lbr_stitch_enable = false;
2708	return false;
2709}
2710
2711/*
2712 * Resolve LBR callstack chain sample
2713 * Return:
2714 * 1 on success get LBR callchain information
2715 * 0 no available LBR callchain information, should try fp
2716 * negative error code on other errors.
2717 */
2718static int resolve_lbr_callchain_sample(struct thread *thread,
2719					struct callchain_cursor *cursor,
2720					struct perf_sample *sample,
2721					struct symbol **parent,
2722					struct addr_location *root_al,
2723					int max_stack,
2724					unsigned int max_lbr)
2725{
2726	bool callee = (callchain_param.order == ORDER_CALLEE);
2727	struct ip_callchain *chain = sample->callchain;
2728	int chain_nr = min(max_stack, (int)chain->nr), i;
2729	struct lbr_stitch *lbr_stitch;
2730	bool stitched_lbr = false;
2731	u64 branch_from = 0;
2732	int err;
2733
2734	for (i = 0; i < chain_nr; i++) {
2735		if (chain->ips[i] == PERF_CONTEXT_USER)
2736			break;
2737	}
2738
2739	/* LBR only affects the user callchain */
2740	if (i == chain_nr)
2741		return 0;
2742
2743	if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
2744	    (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2745		lbr_stitch = thread->lbr_stitch;
2746
2747		stitched_lbr = has_stitched_lbr(thread, sample,
2748						&lbr_stitch->prev_sample,
2749						max_lbr, callee);
2750
2751		if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2752			list_replace_init(&lbr_stitch->lists,
2753					  &lbr_stitch->free_lists);
2754		}
2755		memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2756	}
2757
2758	if (callee) {
2759		/* Add kernel ip */
2760		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2761						  parent, root_al, branch_from,
2762						  true, i);
2763		if (err)
2764			goto error;
2765
2766		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2767					       root_al, &branch_from, true);
2768		if (err)
2769			goto error;
2770
2771		if (stitched_lbr) {
2772			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2773			if (err)
2774				goto error;
2775		}
2776
2777	} else {
2778		if (stitched_lbr) {
2779			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2780			if (err)
2781				goto error;
2782		}
2783		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2784					       root_al, &branch_from, false);
2785		if (err)
2786			goto error;
2787
2788		/* Add kernel ip */
2789		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2790						  parent, root_al, branch_from,
2791						  false, i);
2792		if (err)
2793			goto error;
2794	}
2795	return 1;
2796
2797error:
2798	return (err < 0) ? err : 0;
2799}
2800
2801static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2802			     struct callchain_cursor *cursor,
2803			     struct symbol **parent,
2804			     struct addr_location *root_al,
2805			     u8 *cpumode, int ent)
2806{
2807	int err = 0;
2808
2809	while (--ent >= 0) {
2810		u64 ip = chain->ips[ent];
2811
2812		if (ip >= PERF_CONTEXT_MAX) {
2813			err = add_callchain_ip(thread, cursor, parent,
2814					       root_al, cpumode, ip,
2815					       false, NULL, NULL, 0);
2816			break;
2817		}
2818	}
2819	return err;
2820}
2821
2822static u64 get_leaf_frame_caller(struct perf_sample *sample,
2823		struct thread *thread, int usr_idx)
2824{
2825	if (machine__normalized_is(thread->maps->machine, "arm64"))
2826		return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2827	else
2828		return 0;
2829}
2830
2831static int thread__resolve_callchain_sample(struct thread *thread,
2832					    struct callchain_cursor *cursor,
2833					    struct evsel *evsel,
2834					    struct perf_sample *sample,
2835					    struct symbol **parent,
2836					    struct addr_location *root_al,
2837					    int max_stack)
2838{
2839	struct branch_stack *branch = sample->branch_stack;
2840	struct branch_entry *entries = perf_sample__branch_entries(sample);
2841	struct ip_callchain *chain = sample->callchain;
2842	int chain_nr = 0;
2843	u8 cpumode = PERF_RECORD_MISC_USER;
2844	int i, j, err, nr_entries, usr_idx;
2845	int skip_idx = -1;
2846	int first_call = 0;
2847	u64 leaf_frame_caller;
2848
2849	if (chain)
2850		chain_nr = chain->nr;
2851
2852	if (evsel__has_branch_callstack(evsel)) {
2853		struct perf_env *env = evsel__env(evsel);
2854
2855		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2856						   root_al, max_stack,
2857						   !env ? 0 : env->max_branches);
2858		if (err)
2859			return (err < 0) ? err : 0;
2860	}
2861
2862	/*
2863	 * Based on DWARF debug information, some architectures skip
2864	 * a callchain entry saved by the kernel.
2865	 */
2866	skip_idx = arch_skip_callchain_idx(thread, chain);
2867
2868	/*
2869	 * Add branches to call stack for easier browsing. This gives
2870	 * more context for a sample than just the callers.
2871	 *
2872	 * This uses individual histograms of paths compared to the
2873	 * aggregated histograms the normal LBR mode uses.
2874	 *
2875	 * Limitations for now:
2876	 * - No extra filters
2877	 * - No annotations (should annotate somehow)
2878	 */
2879
2880	if (branch && callchain_param.branch_callstack) {
2881		int nr = min(max_stack, (int)branch->nr);
2882		struct branch_entry be[nr];
2883		struct iterations iter[nr];
2884
2885		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2886			pr_warning("corrupted branch chain. skipping...\n");
2887			goto check_calls;
2888		}
2889
2890		for (i = 0; i < nr; i++) {
2891			if (callchain_param.order == ORDER_CALLEE) {
2892				be[i] = entries[i];
2893
2894				if (chain == NULL)
2895					continue;
2896
2897				/*
2898				 * Check for overlap into the callchain.
2899				 * The return address is one off compared to
2900				 * the branch entry. To adjust for this
2901				 * assume the calling instruction is not longer
2902				 * than 8 bytes.
2903				 */
2904				if (i == skip_idx ||
2905				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
2906					first_call++;
2907				else if (be[i].from < chain->ips[first_call] &&
2908				    be[i].from >= chain->ips[first_call] - 8)
2909					first_call++;
2910			} else
2911				be[i] = entries[branch->nr - i - 1];
2912		}
2913
2914		memset(iter, 0, sizeof(struct iterations) * nr);
2915		nr = remove_loops(be, nr, iter);
2916
2917		for (i = 0; i < nr; i++) {
2918			err = add_callchain_ip(thread, cursor, parent,
2919					       root_al,
2920					       NULL, be[i].to,
2921					       true, &be[i].flags,
2922					       NULL, be[i].from);
2923
2924			if (!err)
2925				err = add_callchain_ip(thread, cursor, parent, root_al,
2926						       NULL, be[i].from,
2927						       true, &be[i].flags,
2928						       &iter[i], 0);
2929			if (err == -EINVAL)
2930				break;
2931			if (err)
2932				return err;
2933		}
2934
2935		if (chain_nr == 0)
2936			return 0;
2937
2938		chain_nr -= nr;
2939	}
2940
2941check_calls:
2942	if (chain && callchain_param.order != ORDER_CALLEE) {
2943		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2944					&cpumode, chain->nr - first_call);
2945		if (err)
2946			return (err < 0) ? err : 0;
2947	}
2948	for (i = first_call, nr_entries = 0;
2949	     i < chain_nr && nr_entries < max_stack; i++) {
2950		u64 ip;
2951
2952		if (callchain_param.order == ORDER_CALLEE)
2953			j = i;
2954		else
2955			j = chain->nr - i - 1;
2956
2957#ifdef HAVE_SKIP_CALLCHAIN_IDX
2958		if (j == skip_idx)
2959			continue;
2960#endif
2961		ip = chain->ips[j];
2962		if (ip < PERF_CONTEXT_MAX)
2963                       ++nr_entries;
2964		else if (callchain_param.order != ORDER_CALLEE) {
2965			err = find_prev_cpumode(chain, thread, cursor, parent,
2966						root_al, &cpumode, j);
2967			if (err)
2968				return (err < 0) ? err : 0;
2969			continue;
2970		}
2971
2972		/*
2973		 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
2974		 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
2975		 * the index will be different in order to add the missing frame
2976		 * at the right place.
2977		 */
2978
2979		usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
2980
2981		if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
2982
2983			leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
2984
2985			/*
2986			 * check if leaf_frame_Caller != ip to not add the same
2987			 * value twice.
2988			 */
2989
2990			if (leaf_frame_caller && leaf_frame_caller != ip) {
2991
2992				err = add_callchain_ip(thread, cursor, parent,
2993					       root_al, &cpumode, leaf_frame_caller,
2994					       false, NULL, NULL, 0);
2995				if (err)
2996					return (err < 0) ? err : 0;
2997			}
2998		}
2999
3000		err = add_callchain_ip(thread, cursor, parent,
3001				       root_al, &cpumode, ip,
3002				       false, NULL, NULL, 0);
3003
3004		if (err)
3005			return (err < 0) ? err : 0;
3006	}
3007
3008	return 0;
3009}
3010
3011static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
3012{
3013	struct symbol *sym = ms->sym;
3014	struct map *map = ms->map;
3015	struct inline_node *inline_node;
3016	struct inline_list *ilist;
 
3017	u64 addr;
3018	int ret = 1;
 
3019
3020	if (!symbol_conf.inline_name || !map || !sym)
3021		return ret;
3022
3023	addr = map__map_ip(map, ip);
3024	addr = map__rip_2objdump(map, addr);
 
3025
3026	inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
3027	if (!inline_node) {
3028		inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
3029		if (!inline_node)
3030			return ret;
3031		inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
3032	}
3033
 
 
 
 
3034	list_for_each_entry(ilist, &inline_node->val, list) {
3035		struct map_symbol ilist_ms = {
3036			.maps = ms->maps,
3037			.map = map,
3038			.sym = ilist->symbol,
3039		};
3040		ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
3041					      NULL, 0, 0, 0, ilist->srcline);
3042
3043		if (ret != 0)
3044			return ret;
3045	}
 
3046
3047	return ret;
3048}
3049
3050static int unwind_entry(struct unwind_entry *entry, void *arg)
3051{
3052	struct callchain_cursor *cursor = arg;
3053	const char *srcline = NULL;
3054	u64 addr = entry->ip;
3055
3056	if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
3057		return 0;
3058
3059	if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
3060		return 0;
3061
3062	/*
3063	 * Convert entry->ip from a virtual address to an offset in
3064	 * its corresponding binary.
3065	 */
3066	if (entry->ms.map)
3067		addr = map__map_ip(entry->ms.map, entry->ip);
3068
3069	srcline = callchain_srcline(&entry->ms, addr);
3070	return callchain_cursor_append(cursor, entry->ip, &entry->ms,
3071				       false, NULL, 0, 0, 0, srcline);
3072}
3073
3074static int thread__resolve_callchain_unwind(struct thread *thread,
3075					    struct callchain_cursor *cursor,
3076					    struct evsel *evsel,
3077					    struct perf_sample *sample,
3078					    int max_stack)
3079{
3080	/* Can we do dwarf post unwind? */
3081	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
3082	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3083		return 0;
3084
3085	/* Bail out if nothing was captured. */
3086	if ((!sample->user_regs.regs) ||
3087	    (!sample->user_stack.size))
3088		return 0;
3089
3090	return unwind__get_entries(unwind_entry, cursor,
3091				   thread, sample, max_stack, false);
3092}
3093
3094int thread__resolve_callchain(struct thread *thread,
3095			      struct callchain_cursor *cursor,
3096			      struct evsel *evsel,
3097			      struct perf_sample *sample,
3098			      struct symbol **parent,
3099			      struct addr_location *root_al,
3100			      int max_stack)
3101{
3102	int ret = 0;
3103
 
 
 
3104	callchain_cursor_reset(cursor);
3105
3106	if (callchain_param.order == ORDER_CALLEE) {
3107		ret = thread__resolve_callchain_sample(thread, cursor,
3108						       evsel, sample,
3109						       parent, root_al,
3110						       max_stack);
3111		if (ret)
3112			return ret;
3113		ret = thread__resolve_callchain_unwind(thread, cursor,
3114						       evsel, sample,
3115						       max_stack);
3116	} else {
3117		ret = thread__resolve_callchain_unwind(thread, cursor,
3118						       evsel, sample,
3119						       max_stack);
3120		if (ret)
3121			return ret;
3122		ret = thread__resolve_callchain_sample(thread, cursor,
3123						       evsel, sample,
3124						       parent, root_al,
3125						       max_stack);
3126	}
3127
3128	return ret;
3129}
3130
3131int machine__for_each_thread(struct machine *machine,
3132			     int (*fn)(struct thread *thread, void *p),
3133			     void *priv)
3134{
3135	struct threads *threads;
3136	struct rb_node *nd;
3137	struct thread *thread;
3138	int rc = 0;
3139	int i;
3140
3141	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3142		threads = &machine->threads[i];
3143		for (nd = rb_first_cached(&threads->entries); nd;
3144		     nd = rb_next(nd)) {
3145			thread = rb_entry(nd, struct thread, rb_node);
3146			rc = fn(thread, priv);
3147			if (rc != 0)
3148				return rc;
3149		}
3150
3151		list_for_each_entry(thread, &threads->dead, node) {
3152			rc = fn(thread, priv);
3153			if (rc != 0)
3154				return rc;
3155		}
3156	}
3157	return rc;
3158}
3159
3160int machines__for_each_thread(struct machines *machines,
3161			      int (*fn)(struct thread *thread, void *p),
3162			      void *priv)
3163{
3164	struct rb_node *nd;
3165	int rc = 0;
3166
3167	rc = machine__for_each_thread(&machines->host, fn, priv);
3168	if (rc != 0)
3169		return rc;
3170
3171	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3172		struct machine *machine = rb_entry(nd, struct machine, rb_node);
3173
3174		rc = machine__for_each_thread(machine, fn, priv);
3175		if (rc != 0)
3176			return rc;
3177	}
3178	return rc;
3179}
3180
3181pid_t machine__get_current_tid(struct machine *machine, int cpu)
3182{
3183	if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3184		return -1;
3185
3186	return machine->current_tid[cpu];
3187}
3188
3189int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3190			     pid_t tid)
3191{
3192	struct thread *thread;
3193	const pid_t init_val = -1;
3194
3195	if (cpu < 0)
3196		return -EINVAL;
3197
3198	if (realloc_array_as_needed(machine->current_tid,
3199				    machine->current_tid_sz,
3200				    (unsigned int)cpu,
3201				    &init_val))
3202		return -ENOMEM;
3203
3204	machine->current_tid[cpu] = tid;
3205
3206	thread = machine__findnew_thread(machine, pid, tid);
3207	if (!thread)
3208		return -ENOMEM;
3209
3210	thread->cpu = cpu;
3211	thread__put(thread);
3212
3213	return 0;
3214}
3215
3216/*
3217 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3218 * machine__normalized_is() if a normalized arch is needed.
3219 */
3220bool machine__is(struct machine *machine, const char *arch)
3221{
3222	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3223}
3224
3225bool machine__normalized_is(struct machine *machine, const char *arch)
3226{
3227	return machine && !strcmp(perf_env__arch(machine->env), arch);
3228}
3229
3230int machine__nr_cpus_avail(struct machine *machine)
3231{
3232	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3233}
3234
3235int machine__get_kernel_start(struct machine *machine)
3236{
3237	struct map *map = machine__kernel_map(machine);
3238	int err = 0;
3239
3240	/*
3241	 * The only addresses above 2^63 are kernel addresses of a 64-bit
3242	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
3243	 * all addresses including kernel addresses are less than 2^32.  In
3244	 * that case (32-bit system), if the kernel mapping is unknown, all
3245	 * addresses will be assumed to be in user space - see
3246	 * machine__kernel_ip().
3247	 */
3248	machine->kernel_start = 1ULL << 63;
3249	if (map) {
3250		err = map__load(map);
3251		/*
3252		 * On x86_64, PTI entry trampolines are less than the
3253		 * start of kernel text, but still above 2^63. So leave
3254		 * kernel_start = 1ULL << 63 for x86_64.
3255		 */
3256		if (!err && !machine__is(machine, "x86_64"))
3257			machine->kernel_start = map->start;
3258	}
3259	return err;
3260}
3261
3262u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3263{
3264	u8 addr_cpumode = cpumode;
3265	bool kernel_ip;
3266
3267	if (!machine->single_address_space)
3268		goto out;
3269
3270	kernel_ip = machine__kernel_ip(machine, addr);
3271	switch (cpumode) {
3272	case PERF_RECORD_MISC_KERNEL:
3273	case PERF_RECORD_MISC_USER:
3274		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3275					   PERF_RECORD_MISC_USER;
3276		break;
3277	case PERF_RECORD_MISC_GUEST_KERNEL:
3278	case PERF_RECORD_MISC_GUEST_USER:
3279		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3280					   PERF_RECORD_MISC_GUEST_USER;
3281		break;
3282	default:
3283		break;
3284	}
3285out:
3286	return addr_cpumode;
3287}
3288
3289struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3290{
3291	return dsos__findnew_id(&machine->dsos, filename, id);
3292}
3293
3294struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3295{
3296	return machine__findnew_dso_id(machine, filename, NULL);
3297}
3298
3299char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3300{
3301	struct machine *machine = vmachine;
3302	struct map *map;
3303	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3304
3305	if (sym == NULL)
3306		return NULL;
3307
3308	*modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
3309	*addrp = map->unmap_ip(map, sym->start);
3310	return sym->name;
3311}
3312
3313int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3314{
3315	struct dso *pos;
3316	int err = 0;
3317
3318	list_for_each_entry(pos, &machine->dsos.head, node) {
3319		if (fn(pos, machine, priv))
3320			err = -1;
3321	}
3322	return err;
3323}
3324
3325int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3326{
3327	struct maps *maps = machine__kernel_maps(machine);
3328	struct map *map;
3329	int err = 0;
3330
3331	for (map = maps__first(maps); map != NULL; map = map__next(map)) {
3332		err = fn(map, priv);
3333		if (err != 0) {
3334			break;
3335		}
3336	}
3337	return err;
3338}
3339
3340bool machine__is_lock_function(struct machine *machine, u64 addr)
3341{
3342	if (!machine->sched.text_start) {
3343		struct map *kmap;
3344		struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3345
3346		if (!sym) {
3347			/* to avoid retry */
3348			machine->sched.text_start = 1;
3349			return false;
3350		}
3351
3352		machine->sched.text_start = kmap->unmap_ip(kmap, sym->start);
3353
3354		/* should not fail from here */
3355		sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3356		machine->sched.text_end = kmap->unmap_ip(kmap, sym->start);
3357
3358		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3359		machine->lock.text_start = kmap->unmap_ip(kmap, sym->start);
3360
3361		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3362		machine->lock.text_end = kmap->unmap_ip(kmap, sym->start);
3363	}
3364
3365	/* failed to get kernel symbols */
3366	if (machine->sched.text_start == 1)
3367		return false;
3368
3369	/* mutex and rwsem functions are in sched text section */
3370	if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3371		return true;
3372
3373	/* spinlock functions are in lock text section */
3374	if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3375		return true;
3376
3377	return false;
3378}