Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include <dirent.h>
   3#include <errno.h>
   4#include <inttypes.h>
   5#include <regex.h>
   6#include <stdlib.h>
   7#include "callchain.h"
   8#include "debug.h"
   9#include "dso.h"
  10#include "env.h"
  11#include "event.h"
  12#include "evsel.h"
  13#include "hist.h"
  14#include "machine.h"
  15#include "map.h"
  16#include "map_symbol.h"
  17#include "branch.h"
  18#include "mem-events.h"
  19#include "srcline.h"
  20#include "symbol.h"
  21#include "sort.h"
  22#include "strlist.h"
  23#include "target.h"
  24#include "thread.h"
  25#include "util.h"
  26#include "vdso.h"
  27#include <stdbool.h>
  28#include <sys/types.h>
  29#include <sys/stat.h>
  30#include <unistd.h>
  31#include "unwind.h"
  32#include "linux/hash.h"
  33#include "asm/bug.h"
  34#include "bpf-event.h"
  35#include <internal/lib.h> // page_size
 
  36
  37#include <linux/ctype.h>
  38#include <symbol/kallsyms.h>
  39#include <linux/mman.h>
  40#include <linux/string.h>
  41#include <linux/zalloc.h>
  42
  43static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
  44
 
 
 
 
 
  45static void dsos__init(struct dsos *dsos)
  46{
  47	INIT_LIST_HEAD(&dsos->head);
  48	dsos->root = RB_ROOT;
  49	init_rwsem(&dsos->lock);
  50}
  51
  52static void machine__threads_init(struct machine *machine)
  53{
  54	int i;
  55
  56	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  57		struct threads *threads = &machine->threads[i];
  58		threads->entries = RB_ROOT_CACHED;
  59		init_rwsem(&threads->lock);
  60		threads->nr = 0;
  61		INIT_LIST_HEAD(&threads->dead);
  62		threads->last_match = NULL;
  63	}
  64}
  65
  66static int machine__set_mmap_name(struct machine *machine)
  67{
  68	if (machine__is_host(machine))
  69		machine->mmap_name = strdup("[kernel.kallsyms]");
  70	else if (machine__is_default_guest(machine))
  71		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
  72	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
  73			  machine->pid) < 0)
  74		machine->mmap_name = NULL;
  75
  76	return machine->mmap_name ? 0 : -ENOMEM;
  77}
  78
  79int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  80{
  81	int err = -ENOMEM;
  82
  83	memset(machine, 0, sizeof(*machine));
  84	map_groups__init(&machine->kmaps, machine);
  85	RB_CLEAR_NODE(&machine->rb_node);
  86	dsos__init(&machine->dsos);
  87
  88	machine__threads_init(machine);
  89
  90	machine->vdso_info = NULL;
  91	machine->env = NULL;
  92
  93	machine->pid = pid;
  94
  95	machine->id_hdr_size = 0;
  96	machine->kptr_restrict_warned = false;
  97	machine->comm_exec = false;
  98	machine->kernel_start = 0;
  99	machine->vmlinux_map = NULL;
 100
 101	machine->root_dir = strdup(root_dir);
 102	if (machine->root_dir == NULL)
 103		return -ENOMEM;
 104
 105	if (machine__set_mmap_name(machine))
 106		goto out;
 107
 108	if (pid != HOST_KERNEL_ID) {
 109		struct thread *thread = machine__findnew_thread(machine, -1,
 110								pid);
 111		char comm[64];
 112
 113		if (thread == NULL)
 114			goto out;
 115
 116		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
 117		thread__set_comm(thread, comm, 0);
 118		thread__put(thread);
 119	}
 120
 121	machine->current_tid = NULL;
 122	err = 0;
 123
 124out:
 125	if (err) {
 126		zfree(&machine->root_dir);
 127		zfree(&machine->mmap_name);
 128	}
 129	return 0;
 130}
 131
 132struct machine *machine__new_host(void)
 133{
 134	struct machine *machine = malloc(sizeof(*machine));
 135
 136	if (machine != NULL) {
 137		machine__init(machine, "", HOST_KERNEL_ID);
 138
 139		if (machine__create_kernel_maps(machine) < 0)
 140			goto out_delete;
 141	}
 142
 143	return machine;
 144out_delete:
 145	free(machine);
 146	return NULL;
 147}
 148
 149struct machine *machine__new_kallsyms(void)
 150{
 151	struct machine *machine = machine__new_host();
 152	/*
 153	 * FIXME:
 154	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
 155	 *    ask for not using the kcore parsing code, once this one is fixed
 156	 *    to create a map per module.
 157	 */
 158	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
 159		machine__delete(machine);
 160		machine = NULL;
 161	}
 162
 163	return machine;
 164}
 165
 166static void dsos__purge(struct dsos *dsos)
 167{
 168	struct dso *pos, *n;
 169
 170	down_write(&dsos->lock);
 171
 172	list_for_each_entry_safe(pos, n, &dsos->head, node) {
 173		RB_CLEAR_NODE(&pos->rb_node);
 174		pos->root = NULL;
 175		list_del_init(&pos->node);
 176		dso__put(pos);
 177	}
 178
 179	up_write(&dsos->lock);
 180}
 181
 182static void dsos__exit(struct dsos *dsos)
 183{
 184	dsos__purge(dsos);
 185	exit_rwsem(&dsos->lock);
 186}
 187
 188void machine__delete_threads(struct machine *machine)
 189{
 190	struct rb_node *nd;
 191	int i;
 192
 193	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 194		struct threads *threads = &machine->threads[i];
 195		down_write(&threads->lock);
 196		nd = rb_first_cached(&threads->entries);
 197		while (nd) {
 198			struct thread *t = rb_entry(nd, struct thread, rb_node);
 199
 200			nd = rb_next(nd);
 201			__machine__remove_thread(machine, t, false);
 202		}
 203		up_write(&threads->lock);
 204	}
 205}
 206
 207void machine__exit(struct machine *machine)
 208{
 209	int i;
 210
 211	if (machine == NULL)
 212		return;
 213
 214	machine__destroy_kernel_maps(machine);
 215	map_groups__exit(&machine->kmaps);
 216	dsos__exit(&machine->dsos);
 217	machine__exit_vdso(machine);
 218	zfree(&machine->root_dir);
 219	zfree(&machine->mmap_name);
 220	zfree(&machine->current_tid);
 221
 222	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 223		struct threads *threads = &machine->threads[i];
 224		struct thread *thread, *n;
 225		/*
 226		 * Forget about the dead, at this point whatever threads were
 227		 * left in the dead lists better have a reference count taken
 228		 * by who is using them, and then, when they drop those references
 229		 * and it finally hits zero, thread__put() will check and see that
 230		 * its not in the dead threads list and will not try to remove it
 231		 * from there, just calling thread__delete() straight away.
 232		 */
 233		list_for_each_entry_safe(thread, n, &threads->dead, node)
 234			list_del_init(&thread->node);
 235
 236		exit_rwsem(&threads->lock);
 237	}
 238}
 239
 240void machine__delete(struct machine *machine)
 241{
 242	if (machine) {
 243		machine__exit(machine);
 244		free(machine);
 245	}
 246}
 247
 248void machines__init(struct machines *machines)
 249{
 250	machine__init(&machines->host, "", HOST_KERNEL_ID);
 251	machines->guests = RB_ROOT_CACHED;
 252}
 253
 254void machines__exit(struct machines *machines)
 255{
 256	machine__exit(&machines->host);
 257	/* XXX exit guest */
 258}
 259
 260struct machine *machines__add(struct machines *machines, pid_t pid,
 261			      const char *root_dir)
 262{
 263	struct rb_node **p = &machines->guests.rb_root.rb_node;
 264	struct rb_node *parent = NULL;
 265	struct machine *pos, *machine = malloc(sizeof(*machine));
 266	bool leftmost = true;
 267
 268	if (machine == NULL)
 269		return NULL;
 270
 271	if (machine__init(machine, root_dir, pid) != 0) {
 272		free(machine);
 273		return NULL;
 274	}
 275
 276	while (*p != NULL) {
 277		parent = *p;
 278		pos = rb_entry(parent, struct machine, rb_node);
 279		if (pid < pos->pid)
 280			p = &(*p)->rb_left;
 281		else {
 282			p = &(*p)->rb_right;
 283			leftmost = false;
 284		}
 285	}
 286
 287	rb_link_node(&machine->rb_node, parent, p);
 288	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
 289
 290	return machine;
 291}
 292
 293void machines__set_comm_exec(struct machines *machines, bool comm_exec)
 294{
 295	struct rb_node *nd;
 296
 297	machines->host.comm_exec = comm_exec;
 298
 299	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 300		struct machine *machine = rb_entry(nd, struct machine, rb_node);
 301
 302		machine->comm_exec = comm_exec;
 303	}
 304}
 305
 306struct machine *machines__find(struct machines *machines, pid_t pid)
 307{
 308	struct rb_node **p = &machines->guests.rb_root.rb_node;
 309	struct rb_node *parent = NULL;
 310	struct machine *machine;
 311	struct machine *default_machine = NULL;
 312
 313	if (pid == HOST_KERNEL_ID)
 314		return &machines->host;
 315
 316	while (*p != NULL) {
 317		parent = *p;
 318		machine = rb_entry(parent, struct machine, rb_node);
 319		if (pid < machine->pid)
 320			p = &(*p)->rb_left;
 321		else if (pid > machine->pid)
 322			p = &(*p)->rb_right;
 323		else
 324			return machine;
 325		if (!machine->pid)
 326			default_machine = machine;
 327	}
 328
 329	return default_machine;
 330}
 331
 332struct machine *machines__findnew(struct machines *machines, pid_t pid)
 333{
 334	char path[PATH_MAX];
 335	const char *root_dir = "";
 336	struct machine *machine = machines__find(machines, pid);
 337
 338	if (machine && (machine->pid == pid))
 339		goto out;
 340
 341	if ((pid != HOST_KERNEL_ID) &&
 342	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
 343	    (symbol_conf.guestmount)) {
 344		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
 345		if (access(path, R_OK)) {
 346			static struct strlist *seen;
 347
 348			if (!seen)
 349				seen = strlist__new(NULL, NULL);
 350
 351			if (!strlist__has_entry(seen, path)) {
 352				pr_err("Can't access file %s\n", path);
 353				strlist__add(seen, path);
 354			}
 355			machine = NULL;
 356			goto out;
 357		}
 358		root_dir = path;
 359	}
 360
 361	machine = machines__add(machines, pid, root_dir);
 362out:
 363	return machine;
 364}
 365
 366void machines__process_guests(struct machines *machines,
 367			      machine__process_t process, void *data)
 368{
 369	struct rb_node *nd;
 370
 371	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 372		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 373		process(pos, data);
 374	}
 375}
 376
 377void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
 378{
 379	struct rb_node *node;
 380	struct machine *machine;
 381
 382	machines->host.id_hdr_size = id_hdr_size;
 383
 384	for (node = rb_first_cached(&machines->guests); node;
 385	     node = rb_next(node)) {
 386		machine = rb_entry(node, struct machine, rb_node);
 387		machine->id_hdr_size = id_hdr_size;
 388	}
 389
 390	return;
 391}
 392
 393static void machine__update_thread_pid(struct machine *machine,
 394				       struct thread *th, pid_t pid)
 395{
 396	struct thread *leader;
 397
 398	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
 399		return;
 400
 401	th->pid_ = pid;
 402
 403	if (th->pid_ == th->tid)
 404		return;
 405
 406	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
 407	if (!leader)
 408		goto out_err;
 409
 410	if (!leader->mg)
 411		leader->mg = map_groups__new(machine);
 412
 413	if (!leader->mg)
 414		goto out_err;
 415
 416	if (th->mg == leader->mg)
 417		return;
 418
 419	if (th->mg) {
 420		/*
 421		 * Maps are created from MMAP events which provide the pid and
 422		 * tid.  Consequently there never should be any maps on a thread
 423		 * with an unknown pid.  Just print an error if there are.
 424		 */
 425		if (!map_groups__empty(th->mg))
 426			pr_err("Discarding thread maps for %d:%d\n",
 427			       th->pid_, th->tid);
 428		map_groups__put(th->mg);
 429	}
 430
 431	th->mg = map_groups__get(leader->mg);
 432out_put:
 433	thread__put(leader);
 434	return;
 435out_err:
 436	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
 437	goto out_put;
 438}
 439
 440/*
 441 * Front-end cache - TID lookups come in blocks,
 442 * so most of the time we dont have to look up
 443 * the full rbtree:
 444 */
 445static struct thread*
 446__threads__get_last_match(struct threads *threads, struct machine *machine,
 447			  int pid, int tid)
 448{
 449	struct thread *th;
 450
 451	th = threads->last_match;
 452	if (th != NULL) {
 453		if (th->tid == tid) {
 454			machine__update_thread_pid(machine, th, pid);
 455			return thread__get(th);
 456		}
 457
 458		threads->last_match = NULL;
 459	}
 460
 461	return NULL;
 462}
 463
 464static struct thread*
 465threads__get_last_match(struct threads *threads, struct machine *machine,
 466			int pid, int tid)
 467{
 468	struct thread *th = NULL;
 469
 470	if (perf_singlethreaded)
 471		th = __threads__get_last_match(threads, machine, pid, tid);
 472
 473	return th;
 474}
 475
 476static void
 477__threads__set_last_match(struct threads *threads, struct thread *th)
 478{
 479	threads->last_match = th;
 480}
 481
 482static void
 483threads__set_last_match(struct threads *threads, struct thread *th)
 484{
 485	if (perf_singlethreaded)
 486		__threads__set_last_match(threads, th);
 487}
 488
 489/*
 490 * Caller must eventually drop thread->refcnt returned with a successful
 491 * lookup/new thread inserted.
 492 */
 493static struct thread *____machine__findnew_thread(struct machine *machine,
 494						  struct threads *threads,
 495						  pid_t pid, pid_t tid,
 496						  bool create)
 497{
 498	struct rb_node **p = &threads->entries.rb_root.rb_node;
 499	struct rb_node *parent = NULL;
 500	struct thread *th;
 501	bool leftmost = true;
 502
 503	th = threads__get_last_match(threads, machine, pid, tid);
 504	if (th)
 505		return th;
 506
 507	while (*p != NULL) {
 508		parent = *p;
 509		th = rb_entry(parent, struct thread, rb_node);
 510
 511		if (th->tid == tid) {
 512			threads__set_last_match(threads, th);
 513			machine__update_thread_pid(machine, th, pid);
 514			return thread__get(th);
 515		}
 516
 517		if (tid < th->tid)
 518			p = &(*p)->rb_left;
 519		else {
 520			p = &(*p)->rb_right;
 521			leftmost = false;
 522		}
 523	}
 524
 525	if (!create)
 526		return NULL;
 527
 528	th = thread__new(pid, tid);
 529	if (th != NULL) {
 530		rb_link_node(&th->rb_node, parent, p);
 531		rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
 532
 533		/*
 534		 * We have to initialize map_groups separately
 535		 * after rb tree is updated.
 536		 *
 537		 * The reason is that we call machine__findnew_thread
 538		 * within thread__init_map_groups to find the thread
 539		 * leader and that would screwed the rb tree.
 540		 */
 541		if (thread__init_map_groups(th, machine)) {
 542			rb_erase_cached(&th->rb_node, &threads->entries);
 543			RB_CLEAR_NODE(&th->rb_node);
 544			thread__put(th);
 545			return NULL;
 546		}
 547		/*
 548		 * It is now in the rbtree, get a ref
 549		 */
 550		thread__get(th);
 551		threads__set_last_match(threads, th);
 552		++threads->nr;
 553	}
 554
 555	return th;
 556}
 557
 558struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
 559{
 560	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
 561}
 562
 563struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
 564				       pid_t tid)
 565{
 566	struct threads *threads = machine__threads(machine, tid);
 567	struct thread *th;
 568
 569	down_write(&threads->lock);
 570	th = __machine__findnew_thread(machine, pid, tid);
 571	up_write(&threads->lock);
 572	return th;
 573}
 574
 575struct thread *machine__find_thread(struct machine *machine, pid_t pid,
 576				    pid_t tid)
 577{
 578	struct threads *threads = machine__threads(machine, tid);
 579	struct thread *th;
 580
 581	down_read(&threads->lock);
 582	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
 583	up_read(&threads->lock);
 584	return th;
 585}
 586
 587struct comm *machine__thread_exec_comm(struct machine *machine,
 588				       struct thread *thread)
 589{
 590	if (machine->comm_exec)
 591		return thread__exec_comm(thread);
 592	else
 593		return thread__comm(thread);
 594}
 595
 596int machine__process_comm_event(struct machine *machine, union perf_event *event,
 597				struct perf_sample *sample)
 598{
 599	struct thread *thread = machine__findnew_thread(machine,
 600							event->comm.pid,
 601							event->comm.tid);
 602	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
 603	int err = 0;
 604
 605	if (exec)
 606		machine->comm_exec = true;
 607
 608	if (dump_trace)
 609		perf_event__fprintf_comm(event, stdout);
 610
 611	if (thread == NULL ||
 612	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
 613		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
 614		err = -1;
 615	}
 616
 617	thread__put(thread);
 618
 619	return err;
 620}
 621
 622int machine__process_namespaces_event(struct machine *machine __maybe_unused,
 623				      union perf_event *event,
 624				      struct perf_sample *sample __maybe_unused)
 625{
 626	struct thread *thread = machine__findnew_thread(machine,
 627							event->namespaces.pid,
 628							event->namespaces.tid);
 629	int err = 0;
 630
 631	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
 632		  "\nWARNING: kernel seems to support more namespaces than perf"
 633		  " tool.\nTry updating the perf tool..\n\n");
 634
 635	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
 636		  "\nWARNING: perf tool seems to support more namespaces than"
 637		  " the kernel.\nTry updating the kernel..\n\n");
 638
 639	if (dump_trace)
 640		perf_event__fprintf_namespaces(event, stdout);
 641
 642	if (thread == NULL ||
 643	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
 644		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
 645		err = -1;
 646	}
 647
 648	thread__put(thread);
 649
 650	return err;
 651}
 652
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 653int machine__process_lost_event(struct machine *machine __maybe_unused,
 654				union perf_event *event, struct perf_sample *sample __maybe_unused)
 655{
 656	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
 657		    event->lost.id, event->lost.lost);
 658	return 0;
 659}
 660
 661int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
 662					union perf_event *event, struct perf_sample *sample)
 663{
 664	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
 665		    sample->id, event->lost_samples.lost);
 666	return 0;
 667}
 668
 669static struct dso *machine__findnew_module_dso(struct machine *machine,
 670					       struct kmod_path *m,
 671					       const char *filename)
 672{
 673	struct dso *dso;
 674
 675	down_write(&machine->dsos.lock);
 676
 677	dso = __dsos__find(&machine->dsos, m->name, true);
 678	if (!dso) {
 679		dso = __dsos__addnew(&machine->dsos, m->name);
 680		if (dso == NULL)
 681			goto out_unlock;
 682
 683		dso__set_module_info(dso, m, machine);
 684		dso__set_long_name(dso, strdup(filename), true);
 
 685	}
 686
 687	dso__get(dso);
 688out_unlock:
 689	up_write(&machine->dsos.lock);
 690	return dso;
 691}
 692
 693int machine__process_aux_event(struct machine *machine __maybe_unused,
 694			       union perf_event *event)
 695{
 696	if (dump_trace)
 697		perf_event__fprintf_aux(event, stdout);
 698	return 0;
 699}
 700
 701int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
 702					union perf_event *event)
 703{
 704	if (dump_trace)
 705		perf_event__fprintf_itrace_start(event, stdout);
 706	return 0;
 707}
 708
 709int machine__process_switch_event(struct machine *machine __maybe_unused,
 710				  union perf_event *event)
 711{
 712	if (dump_trace)
 713		perf_event__fprintf_switch(event, stdout);
 714	return 0;
 715}
 716
 717static int machine__process_ksymbol_register(struct machine *machine,
 718					     union perf_event *event,
 719					     struct perf_sample *sample __maybe_unused)
 720{
 721	struct symbol *sym;
 722	struct map *map;
 723
 724	map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
 725	if (!map) {
 726		map = dso__new_map(event->ksymbol.name);
 727		if (!map)
 
 
 
 
 
 
 
 728			return -ENOMEM;
 
 
 
 
 
 
 
 729
 730		map->start = event->ksymbol.addr;
 731		map->end = map->start + event->ksymbol.len;
 732		map_groups__insert(&machine->kmaps, map);
 
 
 
 
 
 
 733	}
 734
 735	sym = symbol__new(map->map_ip(map, map->start),
 736			  event->ksymbol.len,
 737			  0, 0, event->ksymbol.name);
 738	if (!sym)
 739		return -ENOMEM;
 740	dso__insert_symbol(map->dso, sym);
 741	return 0;
 742}
 743
 744static int machine__process_ksymbol_unregister(struct machine *machine,
 745					       union perf_event *event,
 746					       struct perf_sample *sample __maybe_unused)
 747{
 748	struct map *map;
 749
 750	map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
 751	if (map)
 752		map_groups__remove(&machine->kmaps, map);
 753
 754	return 0;
 755}
 756
 757int machine__process_ksymbol(struct machine *machine __maybe_unused,
 758			     union perf_event *event,
 759			     struct perf_sample *sample)
 760{
 761	if (dump_trace)
 762		perf_event__fprintf_ksymbol(event, stdout);
 763
 764	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
 765		return machine__process_ksymbol_unregister(machine, event,
 766							   sample);
 767	return machine__process_ksymbol_register(machine, event, sample);
 768}
 769
 770static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
 
 771{
 772	const char *dup_filename;
 
 773
 774	if (!filename || !dso || !dso->long_name)
 775		return;
 776	if (dso->long_name[0] != '[')
 777		return;
 778	if (!strchr(filename, '/'))
 779		return;
 780
 781	dup_filename = strdup(filename);
 782	if (!dup_filename)
 783		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784
 785	dso__set_long_name(dso, dup_filename, true);
 786}
 787
 788struct map *machine__findnew_module_map(struct machine *machine, u64 start,
 789					const char *filename)
 790{
 791	struct map *map = NULL;
 792	struct dso *dso = NULL;
 793	struct kmod_path m;
 
 794
 795	if (kmod_path__parse_name(&m, filename))
 796		return NULL;
 797
 798	map = map_groups__find_by_name(&machine->kmaps, m.name);
 799	if (map) {
 800		/*
 801		 * If the map's dso is an offline module, give dso__load()
 802		 * a chance to find the file path of that module by fixing
 803		 * long_name.
 804		 */
 805		dso__adjust_kmod_long_name(map->dso, filename);
 806		goto out;
 807	}
 808
 809	dso = machine__findnew_module_dso(machine, &m, filename);
 810	if (dso == NULL)
 811		goto out;
 812
 813	map = map__new2(start, dso);
 814	if (map == NULL)
 815		goto out;
 816
 817	map_groups__insert(&machine->kmaps, map);
 818
 819	/* Put the map here because map_groups__insert alread got it */
 820	map__put(map);
 821out:
 822	/* put the dso here, corresponding to  machine__findnew_module_dso */
 823	dso__put(dso);
 824	zfree(&m.name);
 825	return map;
 826}
 827
 828size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
 829{
 830	struct rb_node *nd;
 831	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
 832
 833	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 834		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 835		ret += __dsos__fprintf(&pos->dsos.head, fp);
 836	}
 837
 838	return ret;
 839}
 840
 841size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
 842				     bool (skip)(struct dso *dso, int parm), int parm)
 843{
 844	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
 845}
 846
 847size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
 848				     bool (skip)(struct dso *dso, int parm), int parm)
 849{
 850	struct rb_node *nd;
 851	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
 852
 853	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 854		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 855		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
 856	}
 857	return ret;
 858}
 859
 860size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
 861{
 862	int i;
 863	size_t printed = 0;
 864	struct dso *kdso = machine__kernel_map(machine)->dso;
 865
 866	if (kdso->has_build_id) {
 867		char filename[PATH_MAX];
 868		if (dso__build_id_filename(kdso, filename, sizeof(filename),
 869					   false))
 870			printed += fprintf(fp, "[0] %s\n", filename);
 871	}
 872
 873	for (i = 0; i < vmlinux_path__nr_entries; ++i)
 874		printed += fprintf(fp, "[%d] %s\n",
 875				   i + kdso->has_build_id, vmlinux_path[i]);
 876
 877	return printed;
 878}
 879
 880size_t machine__fprintf(struct machine *machine, FILE *fp)
 881{
 882	struct rb_node *nd;
 883	size_t ret;
 884	int i;
 885
 886	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 887		struct threads *threads = &machine->threads[i];
 888
 889		down_read(&threads->lock);
 890
 891		ret = fprintf(fp, "Threads: %u\n", threads->nr);
 892
 893		for (nd = rb_first_cached(&threads->entries); nd;
 894		     nd = rb_next(nd)) {
 895			struct thread *pos = rb_entry(nd, struct thread, rb_node);
 896
 897			ret += thread__fprintf(pos, fp);
 898		}
 899
 900		up_read(&threads->lock);
 901	}
 902	return ret;
 903}
 904
 905static struct dso *machine__get_kernel(struct machine *machine)
 906{
 907	const char *vmlinux_name = machine->mmap_name;
 908	struct dso *kernel;
 909
 910	if (machine__is_host(machine)) {
 911		if (symbol_conf.vmlinux_name)
 912			vmlinux_name = symbol_conf.vmlinux_name;
 913
 914		kernel = machine__findnew_kernel(machine, vmlinux_name,
 915						 "[kernel]", DSO_TYPE_KERNEL);
 916	} else {
 917		if (symbol_conf.default_guest_vmlinux_name)
 918			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
 919
 920		kernel = machine__findnew_kernel(machine, vmlinux_name,
 921						 "[guest.kernel]",
 922						 DSO_TYPE_GUEST_KERNEL);
 923	}
 924
 925	if (kernel != NULL && (!kernel->has_build_id))
 926		dso__read_running_kernel_build_id(kernel, machine);
 927
 928	return kernel;
 929}
 930
 931struct process_args {
 932	u64 start;
 933};
 934
 935void machine__get_kallsyms_filename(struct machine *machine, char *buf,
 936				    size_t bufsz)
 937{
 938	if (machine__is_default_guest(machine))
 939		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
 940	else
 941		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
 942}
 943
 944const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
 945
 946/* Figure out the start address of kernel map from /proc/kallsyms.
 947 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
 948 * symbol_name if it's not that important.
 949 */
 950static int machine__get_running_kernel_start(struct machine *machine,
 951					     const char **symbol_name,
 952					     u64 *start, u64 *end)
 953{
 954	char filename[PATH_MAX];
 955	int i, err = -1;
 956	const char *name;
 957	u64 addr = 0;
 958
 959	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
 960
 961	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
 962		return 0;
 963
 964	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
 965		err = kallsyms__get_function_start(filename, name, &addr);
 966		if (!err)
 967			break;
 968	}
 969
 970	if (err)
 971		return -1;
 972
 973	if (symbol_name)
 974		*symbol_name = name;
 975
 976	*start = addr;
 977
 978	err = kallsyms__get_function_start(filename, "_etext", &addr);
 979	if (!err)
 980		*end = addr;
 981
 982	return 0;
 983}
 984
 985int machine__create_extra_kernel_map(struct machine *machine,
 986				     struct dso *kernel,
 987				     struct extra_kernel_map *xm)
 988{
 989	struct kmap *kmap;
 990	struct map *map;
 991
 992	map = map__new2(xm->start, kernel);
 993	if (!map)
 994		return -1;
 995
 996	map->end   = xm->end;
 997	map->pgoff = xm->pgoff;
 998
 999	kmap = map__kmap(map);
1000
1001	kmap->kmaps = &machine->kmaps;
1002	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1003
1004	map_groups__insert(&machine->kmaps, map);
1005
1006	pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1007		  kmap->name, map->start, map->end);
1008
1009	map__put(map);
1010
1011	return 0;
1012}
1013
1014static u64 find_entry_trampoline(struct dso *dso)
1015{
1016	/* Duplicates are removed so lookup all aliases */
1017	const char *syms[] = {
1018		"_entry_trampoline",
1019		"__entry_trampoline_start",
1020		"entry_SYSCALL_64_trampoline",
1021	};
1022	struct symbol *sym = dso__first_symbol(dso);
1023	unsigned int i;
1024
1025	for (; sym; sym = dso__next_symbol(sym)) {
1026		if (sym->binding != STB_GLOBAL)
1027			continue;
1028		for (i = 0; i < ARRAY_SIZE(syms); i++) {
1029			if (!strcmp(sym->name, syms[i]))
1030				return sym->start;
1031		}
1032	}
1033
1034	return 0;
1035}
1036
1037/*
1038 * These values can be used for kernels that do not have symbols for the entry
1039 * trampolines in kallsyms.
1040 */
1041#define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
1042#define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
1043#define X86_64_ENTRY_TRAMPOLINE		0x6000
1044
1045/* Map x86_64 PTI entry trampolines */
1046int machine__map_x86_64_entry_trampolines(struct machine *machine,
1047					  struct dso *kernel)
1048{
1049	struct map_groups *kmaps = &machine->kmaps;
1050	struct maps *maps = &kmaps->maps;
1051	int nr_cpus_avail, cpu;
1052	bool found = false;
1053	struct map *map;
1054	u64 pgoff;
1055
1056	/*
1057	 * In the vmlinux case, pgoff is a virtual address which must now be
1058	 * mapped to a vmlinux offset.
1059	 */
1060	for (map = maps__first(maps); map; map = map__next(map)) {
1061		struct kmap *kmap = __map__kmap(map);
1062		struct map *dest_map;
1063
1064		if (!kmap || !is_entry_trampoline(kmap->name))
1065			continue;
1066
1067		dest_map = map_groups__find(kmaps, map->pgoff);
1068		if (dest_map != map)
1069			map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1070		found = true;
1071	}
1072	if (found || machine->trampolines_mapped)
1073		return 0;
1074
1075	pgoff = find_entry_trampoline(kernel);
1076	if (!pgoff)
1077		return 0;
1078
1079	nr_cpus_avail = machine__nr_cpus_avail(machine);
1080
1081	/* Add a 1 page map for each CPU's entry trampoline */
1082	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1083		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1084			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1085			 X86_64_ENTRY_TRAMPOLINE;
1086		struct extra_kernel_map xm = {
1087			.start = va,
1088			.end   = va + page_size,
1089			.pgoff = pgoff,
1090		};
1091
1092		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1093
1094		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1095			return -1;
1096	}
1097
1098	machine->trampolines_mapped = nr_cpus_avail;
1099
1100	return 0;
1101}
1102
1103int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1104					     struct dso *kernel __maybe_unused)
1105{
1106	return 0;
1107}
1108
1109static int
1110__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1111{
1112	struct kmap *kmap;
1113	struct map *map;
1114
1115	/* In case of renewal the kernel map, destroy previous one */
1116	machine__destroy_kernel_maps(machine);
1117
1118	machine->vmlinux_map = map__new2(0, kernel);
1119	if (machine->vmlinux_map == NULL)
1120		return -1;
1121
1122	machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1123	map = machine__kernel_map(machine);
1124	kmap = map__kmap(map);
1125	if (!kmap)
1126		return -1;
1127
1128	kmap->kmaps = &machine->kmaps;
1129	map_groups__insert(&machine->kmaps, map);
1130
1131	return 0;
1132}
1133
1134void machine__destroy_kernel_maps(struct machine *machine)
1135{
1136	struct kmap *kmap;
1137	struct map *map = machine__kernel_map(machine);
1138
1139	if (map == NULL)
1140		return;
1141
1142	kmap = map__kmap(map);
1143	map_groups__remove(&machine->kmaps, map);
1144	if (kmap && kmap->ref_reloc_sym) {
1145		zfree((char **)&kmap->ref_reloc_sym->name);
1146		zfree(&kmap->ref_reloc_sym);
1147	}
1148
1149	map__zput(machine->vmlinux_map);
1150}
1151
1152int machines__create_guest_kernel_maps(struct machines *machines)
1153{
1154	int ret = 0;
1155	struct dirent **namelist = NULL;
1156	int i, items = 0;
1157	char path[PATH_MAX];
1158	pid_t pid;
1159	char *endp;
1160
1161	if (symbol_conf.default_guest_vmlinux_name ||
1162	    symbol_conf.default_guest_modules ||
1163	    symbol_conf.default_guest_kallsyms) {
1164		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1165	}
1166
1167	if (symbol_conf.guestmount) {
1168		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1169		if (items <= 0)
1170			return -ENOENT;
1171		for (i = 0; i < items; i++) {
1172			if (!isdigit(namelist[i]->d_name[0])) {
1173				/* Filter out . and .. */
1174				continue;
1175			}
1176			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1177			if ((*endp != '\0') ||
1178			    (endp == namelist[i]->d_name) ||
1179			    (errno == ERANGE)) {
1180				pr_debug("invalid directory (%s). Skipping.\n",
1181					 namelist[i]->d_name);
1182				continue;
1183			}
1184			sprintf(path, "%s/%s/proc/kallsyms",
1185				symbol_conf.guestmount,
1186				namelist[i]->d_name);
1187			ret = access(path, R_OK);
1188			if (ret) {
1189				pr_debug("Can't access file %s\n", path);
1190				goto failure;
1191			}
1192			machines__create_kernel_maps(machines, pid);
1193		}
1194failure:
1195		free(namelist);
1196	}
1197
1198	return ret;
1199}
1200
1201void machines__destroy_kernel_maps(struct machines *machines)
1202{
1203	struct rb_node *next = rb_first_cached(&machines->guests);
1204
1205	machine__destroy_kernel_maps(&machines->host);
1206
1207	while (next) {
1208		struct machine *pos = rb_entry(next, struct machine, rb_node);
1209
1210		next = rb_next(&pos->rb_node);
1211		rb_erase_cached(&pos->rb_node, &machines->guests);
1212		machine__delete(pos);
1213	}
1214}
1215
1216int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1217{
1218	struct machine *machine = machines__findnew(machines, pid);
1219
1220	if (machine == NULL)
1221		return -1;
1222
1223	return machine__create_kernel_maps(machine);
1224}
1225
1226int machine__load_kallsyms(struct machine *machine, const char *filename)
1227{
1228	struct map *map = machine__kernel_map(machine);
1229	int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1230
1231	if (ret > 0) {
1232		dso__set_loaded(map->dso);
1233		/*
1234		 * Since /proc/kallsyms will have multiple sessions for the
1235		 * kernel, with modules between them, fixup the end of all
1236		 * sections.
1237		 */
1238		map_groups__fixup_end(&machine->kmaps);
1239	}
1240
1241	return ret;
1242}
1243
1244int machine__load_vmlinux_path(struct machine *machine)
1245{
1246	struct map *map = machine__kernel_map(machine);
1247	int ret = dso__load_vmlinux_path(map->dso, map);
1248
1249	if (ret > 0)
1250		dso__set_loaded(map->dso);
1251
1252	return ret;
1253}
1254
1255static char *get_kernel_version(const char *root_dir)
1256{
1257	char version[PATH_MAX];
1258	FILE *file;
1259	char *name, *tmp;
1260	const char *prefix = "Linux version ";
1261
1262	sprintf(version, "%s/proc/version", root_dir);
1263	file = fopen(version, "r");
1264	if (!file)
1265		return NULL;
1266
1267	tmp = fgets(version, sizeof(version), file);
1268	fclose(file);
1269	if (!tmp)
1270		return NULL;
1271
1272	name = strstr(version, prefix);
1273	if (!name)
1274		return NULL;
1275	name += strlen(prefix);
1276	tmp = strchr(name, ' ');
1277	if (tmp)
1278		*tmp = '\0';
1279
1280	return strdup(name);
1281}
1282
1283static bool is_kmod_dso(struct dso *dso)
1284{
1285	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1286	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1287}
1288
1289static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1290				       struct kmod_path *m)
1291{
1292	char *long_name;
1293	struct map *map = map_groups__find_by_name(mg, m->name);
1294
1295	if (map == NULL)
1296		return 0;
1297
1298	long_name = strdup(path);
1299	if (long_name == NULL)
1300		return -ENOMEM;
1301
1302	dso__set_long_name(map->dso, long_name, true);
1303	dso__kernel_module_get_build_id(map->dso, "");
1304
1305	/*
1306	 * Full name could reveal us kmod compression, so
1307	 * we need to update the symtab_type if needed.
1308	 */
1309	if (m->comp && is_kmod_dso(map->dso)) {
1310		map->dso->symtab_type++;
1311		map->dso->comp = m->comp;
1312	}
1313
1314	return 0;
1315}
1316
1317static int map_groups__set_modules_path_dir(struct map_groups *mg,
1318				const char *dir_name, int depth)
1319{
1320	struct dirent *dent;
1321	DIR *dir = opendir(dir_name);
1322	int ret = 0;
1323
1324	if (!dir) {
1325		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1326		return -1;
1327	}
1328
1329	while ((dent = readdir(dir)) != NULL) {
1330		char path[PATH_MAX];
1331		struct stat st;
1332
1333		/*sshfs might return bad dent->d_type, so we have to stat*/
1334		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1335		if (stat(path, &st))
1336			continue;
1337
1338		if (S_ISDIR(st.st_mode)) {
1339			if (!strcmp(dent->d_name, ".") ||
1340			    !strcmp(dent->d_name, ".."))
1341				continue;
1342
1343			/* Do not follow top-level source and build symlinks */
1344			if (depth == 0) {
1345				if (!strcmp(dent->d_name, "source") ||
1346				    !strcmp(dent->d_name, "build"))
1347					continue;
1348			}
1349
1350			ret = map_groups__set_modules_path_dir(mg, path,
1351							       depth + 1);
1352			if (ret < 0)
1353				goto out;
1354		} else {
1355			struct kmod_path m;
1356
1357			ret = kmod_path__parse_name(&m, dent->d_name);
1358			if (ret)
1359				goto out;
1360
1361			if (m.kmod)
1362				ret = map_groups__set_module_path(mg, path, &m);
1363
1364			zfree(&m.name);
1365
1366			if (ret)
1367				goto out;
1368		}
1369	}
1370
1371out:
1372	closedir(dir);
1373	return ret;
1374}
1375
1376static int machine__set_modules_path(struct machine *machine)
1377{
1378	char *version;
1379	char modules_path[PATH_MAX];
1380
1381	version = get_kernel_version(machine->root_dir);
1382	if (!version)
1383		return -1;
1384
1385	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1386		 machine->root_dir, version);
1387	free(version);
1388
1389	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1390}
1391int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1392				u64 *size __maybe_unused,
1393				const char *name __maybe_unused)
1394{
1395	return 0;
1396}
1397
1398static int machine__create_module(void *arg, const char *name, u64 start,
1399				  u64 size)
1400{
1401	struct machine *machine = arg;
1402	struct map *map;
1403
1404	if (arch__fix_module_text_start(&start, &size, name) < 0)
1405		return -1;
1406
1407	map = machine__findnew_module_map(machine, start, name);
1408	if (map == NULL)
1409		return -1;
1410	map->end = start + size;
1411
1412	dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1413
1414	return 0;
1415}
1416
1417static int machine__create_modules(struct machine *machine)
1418{
1419	const char *modules;
1420	char path[PATH_MAX];
1421
1422	if (machine__is_default_guest(machine)) {
1423		modules = symbol_conf.default_guest_modules;
1424	} else {
1425		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1426		modules = path;
1427	}
1428
1429	if (symbol__restricted_filename(modules, "/proc/modules"))
1430		return -1;
1431
1432	if (modules__parse(modules, machine, machine__create_module))
1433		return -1;
1434
1435	if (!machine__set_modules_path(machine))
1436		return 0;
1437
1438	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1439
1440	return 0;
1441}
1442
1443static void machine__set_kernel_mmap(struct machine *machine,
1444				     u64 start, u64 end)
1445{
1446	machine->vmlinux_map->start = start;
1447	machine->vmlinux_map->end   = end;
1448	/*
1449	 * Be a bit paranoid here, some perf.data file came with
1450	 * a zero sized synthesized MMAP event for the kernel.
1451	 */
1452	if (start == 0 && end == 0)
1453		machine->vmlinux_map->end = ~0ULL;
1454}
1455
1456static void machine__update_kernel_mmap(struct machine *machine,
1457				     u64 start, u64 end)
1458{
1459	struct map *map = machine__kernel_map(machine);
1460
1461	map__get(map);
1462	map_groups__remove(&machine->kmaps, map);
1463
1464	machine__set_kernel_mmap(machine, start, end);
1465
1466	map_groups__insert(&machine->kmaps, map);
1467	map__put(map);
1468}
1469
1470int machine__create_kernel_maps(struct machine *machine)
1471{
1472	struct dso *kernel = machine__get_kernel(machine);
1473	const char *name = NULL;
1474	struct map *map;
1475	u64 start = 0, end = ~0ULL;
1476	int ret;
1477
1478	if (kernel == NULL)
1479		return -1;
1480
1481	ret = __machine__create_kernel_maps(machine, kernel);
1482	if (ret < 0)
1483		goto out_put;
1484
1485	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1486		if (machine__is_host(machine))
1487			pr_debug("Problems creating module maps, "
1488				 "continuing anyway...\n");
1489		else
1490			pr_debug("Problems creating module maps for guest %d, "
1491				 "continuing anyway...\n", machine->pid);
1492	}
1493
1494	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1495		if (name &&
1496		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1497			machine__destroy_kernel_maps(machine);
1498			ret = -1;
1499			goto out_put;
1500		}
1501
1502		/*
1503		 * we have a real start address now, so re-order the kmaps
1504		 * assume it's the last in the kmaps
1505		 */
1506		machine__update_kernel_mmap(machine, start, end);
1507	}
1508
1509	if (machine__create_extra_kernel_maps(machine, kernel))
1510		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1511
1512	if (end == ~0ULL) {
1513		/* update end address of the kernel map using adjacent module address */
1514		map = map__next(machine__kernel_map(machine));
1515		if (map)
1516			machine__set_kernel_mmap(machine, start, map->start);
1517	}
1518
1519out_put:
1520	dso__put(kernel);
1521	return ret;
1522}
1523
1524static bool machine__uses_kcore(struct machine *machine)
1525{
1526	struct dso *dso;
1527
1528	list_for_each_entry(dso, &machine->dsos.head, node) {
1529		if (dso__is_kcore(dso))
1530			return true;
1531	}
1532
1533	return false;
1534}
1535
1536static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1537					     union perf_event *event)
1538{
1539	return machine__is(machine, "x86_64") &&
1540	       is_entry_trampoline(event->mmap.filename);
1541}
1542
1543static int machine__process_extra_kernel_map(struct machine *machine,
1544					     union perf_event *event)
1545{
1546	struct map *kernel_map = machine__kernel_map(machine);
1547	struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
1548	struct extra_kernel_map xm = {
1549		.start = event->mmap.start,
1550		.end   = event->mmap.start + event->mmap.len,
1551		.pgoff = event->mmap.pgoff,
1552	};
1553
1554	if (kernel == NULL)
1555		return -1;
1556
1557	strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1558
1559	return machine__create_extra_kernel_map(machine, kernel, &xm);
1560}
1561
1562static int machine__process_kernel_mmap_event(struct machine *machine,
1563					      union perf_event *event)
1564{
1565	struct map *map;
1566	enum dso_kernel_type kernel_type;
1567	bool is_kernel_mmap;
1568
1569	/* If we have maps from kcore then we do not need or want any others */
1570	if (machine__uses_kcore(machine))
1571		return 0;
1572
1573	if (machine__is_host(machine))
1574		kernel_type = DSO_TYPE_KERNEL;
1575	else
1576		kernel_type = DSO_TYPE_GUEST_KERNEL;
1577
1578	is_kernel_mmap = memcmp(event->mmap.filename,
1579				machine->mmap_name,
1580				strlen(machine->mmap_name) - 1) == 0;
1581	if (event->mmap.filename[0] == '/' ||
1582	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1583		map = machine__findnew_module_map(machine, event->mmap.start,
1584						  event->mmap.filename);
1585		if (map == NULL)
1586			goto out_problem;
1587
1588		map->end = map->start + event->mmap.len;
1589	} else if (is_kernel_mmap) {
1590		const char *symbol_name = (event->mmap.filename +
1591				strlen(machine->mmap_name));
1592		/*
1593		 * Should be there already, from the build-id table in
1594		 * the header.
1595		 */
1596		struct dso *kernel = NULL;
1597		struct dso *dso;
1598
1599		down_read(&machine->dsos.lock);
1600
1601		list_for_each_entry(dso, &machine->dsos.head, node) {
1602
1603			/*
1604			 * The cpumode passed to is_kernel_module is not the
1605			 * cpumode of *this* event. If we insist on passing
1606			 * correct cpumode to is_kernel_module, we should
1607			 * record the cpumode when we adding this dso to the
1608			 * linked list.
1609			 *
1610			 * However we don't really need passing correct
1611			 * cpumode.  We know the correct cpumode must be kernel
1612			 * mode (if not, we should not link it onto kernel_dsos
1613			 * list).
1614			 *
1615			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1616			 * is_kernel_module() treats it as a kernel cpumode.
1617			 */
1618
1619			if (!dso->kernel ||
1620			    is_kernel_module(dso->long_name,
1621					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1622				continue;
1623
1624
1625			kernel = dso;
1626			break;
1627		}
1628
1629		up_read(&machine->dsos.lock);
1630
1631		if (kernel == NULL)
1632			kernel = machine__findnew_dso(machine, machine->mmap_name);
1633		if (kernel == NULL)
1634			goto out_problem;
1635
1636		kernel->kernel = kernel_type;
1637		if (__machine__create_kernel_maps(machine, kernel) < 0) {
1638			dso__put(kernel);
1639			goto out_problem;
1640		}
1641
1642		if (strstr(kernel->long_name, "vmlinux"))
1643			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1644
1645		machine__update_kernel_mmap(machine, event->mmap.start,
1646					 event->mmap.start + event->mmap.len);
1647
1648		/*
1649		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1650		 * symbol. Effectively having zero here means that at record
1651		 * time /proc/sys/kernel/kptr_restrict was non zero.
1652		 */
1653		if (event->mmap.pgoff != 0) {
1654			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1655							symbol_name,
1656							event->mmap.pgoff);
1657		}
1658
1659		if (machine__is_default_guest(machine)) {
1660			/*
1661			 * preload dso of guest kernel and modules
1662			 */
1663			dso__load(kernel, machine__kernel_map(machine));
1664		}
1665	} else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1666		return machine__process_extra_kernel_map(machine, event);
1667	}
1668	return 0;
1669out_problem:
1670	return -1;
1671}
1672
1673int machine__process_mmap2_event(struct machine *machine,
1674				 union perf_event *event,
1675				 struct perf_sample *sample)
1676{
1677	struct thread *thread;
1678	struct map *map;
 
 
 
 
 
 
1679	int ret = 0;
1680
1681	if (dump_trace)
1682		perf_event__fprintf_mmap2(event, stdout);
1683
1684	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1685	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1686		ret = machine__process_kernel_mmap_event(machine, event);
1687		if (ret < 0)
1688			goto out_problem;
1689		return 0;
1690	}
1691
1692	thread = machine__findnew_thread(machine, event->mmap2.pid,
1693					event->mmap2.tid);
1694	if (thread == NULL)
1695		goto out_problem;
1696
1697	map = map__new(machine, event->mmap2.start,
1698			event->mmap2.len, event->mmap2.pgoff,
1699			event->mmap2.maj,
1700			event->mmap2.min, event->mmap2.ino,
1701			event->mmap2.ino_generation,
1702			event->mmap2.prot,
1703			event->mmap2.flags,
1704			event->mmap2.filename, thread);
1705
1706	if (map == NULL)
1707		goto out_problem_map;
1708
1709	ret = thread__insert_map(thread, map);
1710	if (ret)
1711		goto out_problem_insert;
1712
1713	thread__put(thread);
1714	map__put(map);
1715	return 0;
1716
1717out_problem_insert:
1718	map__put(map);
1719out_problem_map:
1720	thread__put(thread);
1721out_problem:
1722	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1723	return 0;
1724}
1725
1726int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1727				struct perf_sample *sample)
1728{
1729	struct thread *thread;
1730	struct map *map;
1731	u32 prot = 0;
1732	int ret = 0;
1733
1734	if (dump_trace)
1735		perf_event__fprintf_mmap(event, stdout);
1736
1737	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1738	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1739		ret = machine__process_kernel_mmap_event(machine, event);
1740		if (ret < 0)
1741			goto out_problem;
1742		return 0;
1743	}
1744
1745	thread = machine__findnew_thread(machine, event->mmap.pid,
1746					 event->mmap.tid);
1747	if (thread == NULL)
1748		goto out_problem;
1749
1750	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1751		prot = PROT_EXEC;
1752
1753	map = map__new(machine, event->mmap.start,
1754			event->mmap.len, event->mmap.pgoff,
1755			0, 0, 0, 0, prot, 0,
1756			event->mmap.filename,
1757			thread);
1758
1759	if (map == NULL)
1760		goto out_problem_map;
1761
1762	ret = thread__insert_map(thread, map);
1763	if (ret)
1764		goto out_problem_insert;
1765
1766	thread__put(thread);
1767	map__put(map);
1768	return 0;
1769
1770out_problem_insert:
1771	map__put(map);
1772out_problem_map:
1773	thread__put(thread);
1774out_problem:
1775	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1776	return 0;
1777}
1778
1779static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1780{
1781	struct threads *threads = machine__threads(machine, th->tid);
1782
1783	if (threads->last_match == th)
1784		threads__set_last_match(threads, NULL);
1785
1786	if (lock)
1787		down_write(&threads->lock);
1788
1789	BUG_ON(refcount_read(&th->refcnt) == 0);
1790
1791	rb_erase_cached(&th->rb_node, &threads->entries);
1792	RB_CLEAR_NODE(&th->rb_node);
1793	--threads->nr;
1794	/*
1795	 * Move it first to the dead_threads list, then drop the reference,
1796	 * if this is the last reference, then the thread__delete destructor
1797	 * will be called and we will remove it from the dead_threads list.
1798	 */
1799	list_add_tail(&th->node, &threads->dead);
1800
1801	/*
1802	 * We need to do the put here because if this is the last refcount,
1803	 * then we will be touching the threads->dead head when removing the
1804	 * thread.
1805	 */
1806	thread__put(th);
1807
1808	if (lock)
1809		up_write(&threads->lock);
1810}
1811
1812void machine__remove_thread(struct machine *machine, struct thread *th)
1813{
1814	return __machine__remove_thread(machine, th, true);
1815}
1816
1817int machine__process_fork_event(struct machine *machine, union perf_event *event,
1818				struct perf_sample *sample)
1819{
1820	struct thread *thread = machine__find_thread(machine,
1821						     event->fork.pid,
1822						     event->fork.tid);
1823	struct thread *parent = machine__findnew_thread(machine,
1824							event->fork.ppid,
1825							event->fork.ptid);
1826	bool do_maps_clone = true;
1827	int err = 0;
1828
1829	if (dump_trace)
1830		perf_event__fprintf_task(event, stdout);
1831
1832	/*
1833	 * There may be an existing thread that is not actually the parent,
1834	 * either because we are processing events out of order, or because the
1835	 * (fork) event that would have removed the thread was lost. Assume the
1836	 * latter case and continue on as best we can.
1837	 */
1838	if (parent->pid_ != (pid_t)event->fork.ppid) {
1839		dump_printf("removing erroneous parent thread %d/%d\n",
1840			    parent->pid_, parent->tid);
1841		machine__remove_thread(machine, parent);
1842		thread__put(parent);
1843		parent = machine__findnew_thread(machine, event->fork.ppid,
1844						 event->fork.ptid);
1845	}
1846
1847	/* if a thread currently exists for the thread id remove it */
1848	if (thread != NULL) {
1849		machine__remove_thread(machine, thread);
1850		thread__put(thread);
1851	}
1852
1853	thread = machine__findnew_thread(machine, event->fork.pid,
1854					 event->fork.tid);
1855	/*
1856	 * When synthesizing FORK events, we are trying to create thread
1857	 * objects for the already running tasks on the machine.
1858	 *
1859	 * Normally, for a kernel FORK event, we want to clone the parent's
1860	 * maps because that is what the kernel just did.
1861	 *
1862	 * But when synthesizing, this should not be done.  If we do, we end up
1863	 * with overlapping maps as we process the sythesized MMAP2 events that
1864	 * get delivered shortly thereafter.
1865	 *
1866	 * Use the FORK event misc flags in an internal way to signal this
1867	 * situation, so we can elide the map clone when appropriate.
1868	 */
1869	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1870		do_maps_clone = false;
1871
1872	if (thread == NULL || parent == NULL ||
1873	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1874		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1875		err = -1;
1876	}
1877	thread__put(thread);
1878	thread__put(parent);
1879
1880	return err;
1881}
1882
1883int machine__process_exit_event(struct machine *machine, union perf_event *event,
1884				struct perf_sample *sample __maybe_unused)
1885{
1886	struct thread *thread = machine__find_thread(machine,
1887						     event->fork.pid,
1888						     event->fork.tid);
1889
1890	if (dump_trace)
1891		perf_event__fprintf_task(event, stdout);
1892
1893	if (thread != NULL) {
1894		thread__exited(thread);
1895		thread__put(thread);
1896	}
1897
1898	return 0;
1899}
1900
1901int machine__process_event(struct machine *machine, union perf_event *event,
1902			   struct perf_sample *sample)
1903{
1904	int ret;
1905
1906	switch (event->header.type) {
1907	case PERF_RECORD_COMM:
1908		ret = machine__process_comm_event(machine, event, sample); break;
1909	case PERF_RECORD_MMAP:
1910		ret = machine__process_mmap_event(machine, event, sample); break;
1911	case PERF_RECORD_NAMESPACES:
1912		ret = machine__process_namespaces_event(machine, event, sample); break;
 
 
1913	case PERF_RECORD_MMAP2:
1914		ret = machine__process_mmap2_event(machine, event, sample); break;
1915	case PERF_RECORD_FORK:
1916		ret = machine__process_fork_event(machine, event, sample); break;
1917	case PERF_RECORD_EXIT:
1918		ret = machine__process_exit_event(machine, event, sample); break;
1919	case PERF_RECORD_LOST:
1920		ret = machine__process_lost_event(machine, event, sample); break;
1921	case PERF_RECORD_AUX:
1922		ret = machine__process_aux_event(machine, event); break;
1923	case PERF_RECORD_ITRACE_START:
1924		ret = machine__process_itrace_start_event(machine, event); break;
1925	case PERF_RECORD_LOST_SAMPLES:
1926		ret = machine__process_lost_samples_event(machine, event, sample); break;
1927	case PERF_RECORD_SWITCH:
1928	case PERF_RECORD_SWITCH_CPU_WIDE:
1929		ret = machine__process_switch_event(machine, event); break;
1930	case PERF_RECORD_KSYMBOL:
1931		ret = machine__process_ksymbol(machine, event, sample); break;
1932	case PERF_RECORD_BPF_EVENT:
1933		ret = machine__process_bpf(machine, event, sample); break;
 
 
1934	default:
1935		ret = -1;
1936		break;
1937	}
1938
1939	return ret;
1940}
1941
1942static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1943{
1944	if (!regexec(regex, sym->name, 0, NULL, 0))
1945		return 1;
1946	return 0;
1947}
1948
1949static void ip__resolve_ams(struct thread *thread,
1950			    struct addr_map_symbol *ams,
1951			    u64 ip)
1952{
1953	struct addr_location al;
1954
1955	memset(&al, 0, sizeof(al));
1956	/*
1957	 * We cannot use the header.misc hint to determine whether a
1958	 * branch stack address is user, kernel, guest, hypervisor.
1959	 * Branches may straddle the kernel/user/hypervisor boundaries.
1960	 * Thus, we have to try consecutively until we find a match
1961	 * or else, the symbol is unknown
1962	 */
1963	thread__find_cpumode_addr_location(thread, ip, &al);
1964
1965	ams->addr = ip;
1966	ams->al_addr = al.addr;
1967	ams->sym = al.sym;
1968	ams->map = al.map;
 
1969	ams->phys_addr = 0;
1970}
1971
1972static void ip__resolve_data(struct thread *thread,
1973			     u8 m, struct addr_map_symbol *ams,
1974			     u64 addr, u64 phys_addr)
1975{
1976	struct addr_location al;
1977
1978	memset(&al, 0, sizeof(al));
1979
1980	thread__find_symbol(thread, m, addr, &al);
1981
1982	ams->addr = addr;
1983	ams->al_addr = al.addr;
1984	ams->sym = al.sym;
1985	ams->map = al.map;
 
1986	ams->phys_addr = phys_addr;
1987}
1988
1989struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1990				     struct addr_location *al)
1991{
1992	struct mem_info *mi = mem_info__new();
1993
1994	if (!mi)
1995		return NULL;
1996
1997	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1998	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
1999			 sample->addr, sample->phys_addr);
2000	mi->data_src.val = sample->data_src;
2001
2002	return mi;
2003}
2004
2005static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
2006{
 
2007	char *srcline = NULL;
2008
2009	if (!map || callchain_param.key == CCKEY_FUNCTION)
2010		return srcline;
2011
2012	srcline = srcline__tree_find(&map->dso->srclines, ip);
2013	if (!srcline) {
2014		bool show_sym = false;
2015		bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2016
2017		srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2018				      sym, show_sym, show_addr, ip);
2019		srcline__tree_insert(&map->dso->srclines, ip, srcline);
2020	}
2021
2022	return srcline;
2023}
2024
2025struct iterations {
2026	int nr_loop_iter;
2027	u64 cycles;
2028};
2029
2030static int add_callchain_ip(struct thread *thread,
2031			    struct callchain_cursor *cursor,
2032			    struct symbol **parent,
2033			    struct addr_location *root_al,
2034			    u8 *cpumode,
2035			    u64 ip,
2036			    bool branch,
2037			    struct branch_flags *flags,
2038			    struct iterations *iter,
2039			    u64 branch_from)
2040{
 
2041	struct addr_location al;
2042	int nr_loop_iter = 0;
2043	u64 iter_cycles = 0;
2044	const char *srcline = NULL;
2045
2046	al.filtered = 0;
2047	al.sym = NULL;
2048	if (!cpumode) {
2049		thread__find_cpumode_addr_location(thread, ip, &al);
2050	} else {
2051		if (ip >= PERF_CONTEXT_MAX) {
2052			switch (ip) {
2053			case PERF_CONTEXT_HV:
2054				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2055				break;
2056			case PERF_CONTEXT_KERNEL:
2057				*cpumode = PERF_RECORD_MISC_KERNEL;
2058				break;
2059			case PERF_CONTEXT_USER:
2060				*cpumode = PERF_RECORD_MISC_USER;
2061				break;
2062			default:
2063				pr_debug("invalid callchain context: "
2064					 "%"PRId64"\n", (s64) ip);
2065				/*
2066				 * It seems the callchain is corrupted.
2067				 * Discard all.
2068				 */
2069				callchain_cursor_reset(cursor);
2070				return 1;
2071			}
2072			return 0;
2073		}
2074		thread__find_symbol(thread, *cpumode, ip, &al);
2075	}
2076
2077	if (al.sym != NULL) {
2078		if (perf_hpp_list.parent && !*parent &&
2079		    symbol__match_regex(al.sym, &parent_regex))
2080			*parent = al.sym;
2081		else if (have_ignore_callees && root_al &&
2082		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
2083			/* Treat this symbol as the root,
2084			   forgetting its callees. */
2085			*root_al = al;
2086			callchain_cursor_reset(cursor);
2087		}
2088	}
2089
2090	if (symbol_conf.hide_unresolved && al.sym == NULL)
2091		return 0;
2092
2093	if (iter) {
2094		nr_loop_iter = iter->nr_loop_iter;
2095		iter_cycles = iter->cycles;
2096	}
2097
2098	srcline = callchain_srcline(al.map, al.sym, al.addr);
2099	return callchain_cursor_append(cursor, ip, al.map, al.sym,
 
 
 
2100				       branch, flags, nr_loop_iter,
2101				       iter_cycles, branch_from, srcline);
2102}
2103
2104struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2105					   struct addr_location *al)
2106{
2107	unsigned int i;
2108	const struct branch_stack *bs = sample->branch_stack;
 
2109	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2110
2111	if (!bi)
2112		return NULL;
2113
2114	for (i = 0; i < bs->nr; i++) {
2115		ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
2116		ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
2117		bi[i].flags = bs->entries[i].flags;
2118	}
2119	return bi;
2120}
2121
2122static void save_iterations(struct iterations *iter,
2123			    struct branch_entry *be, int nr)
2124{
2125	int i;
2126
2127	iter->nr_loop_iter++;
2128	iter->cycles = 0;
2129
2130	for (i = 0; i < nr; i++)
2131		iter->cycles += be[i].flags.cycles;
2132}
2133
2134#define CHASHSZ 127
2135#define CHASHBITS 7
2136#define NO_ENTRY 0xff
2137
2138#define PERF_MAX_BRANCH_DEPTH 127
2139
2140/* Remove loops. */
2141static int remove_loops(struct branch_entry *l, int nr,
2142			struct iterations *iter)
2143{
2144	int i, j, off;
2145	unsigned char chash[CHASHSZ];
2146
2147	memset(chash, NO_ENTRY, sizeof(chash));
2148
2149	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2150
2151	for (i = 0; i < nr; i++) {
2152		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2153
2154		/* no collision handling for now */
2155		if (chash[h] == NO_ENTRY) {
2156			chash[h] = i;
2157		} else if (l[chash[h]].from == l[i].from) {
2158			bool is_loop = true;
2159			/* check if it is a real loop */
2160			off = 0;
2161			for (j = chash[h]; j < i && i + off < nr; j++, off++)
2162				if (l[j].from != l[i + off].from) {
2163					is_loop = false;
2164					break;
2165				}
2166			if (is_loop) {
2167				j = nr - (i + off);
2168				if (j > 0) {
2169					save_iterations(iter + i + off,
2170						l + i, off);
2171
2172					memmove(iter + i, iter + i + off,
2173						j * sizeof(*iter));
2174
2175					memmove(l + i, l + i + off,
2176						j * sizeof(*l));
2177				}
2178
2179				nr -= off;
2180			}
2181		}
2182	}
2183	return nr;
2184}
2185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2186/*
2187 * Recolve LBR callstack chain sample
2188 * Return:
2189 * 1 on success get LBR callchain information
2190 * 0 no available LBR callchain information, should try fp
2191 * negative error code on other errors.
2192 */
2193static int resolve_lbr_callchain_sample(struct thread *thread,
2194					struct callchain_cursor *cursor,
2195					struct perf_sample *sample,
2196					struct symbol **parent,
2197					struct addr_location *root_al,
2198					int max_stack)
 
2199{
 
2200	struct ip_callchain *chain = sample->callchain;
2201	int chain_nr = min(max_stack, (int)chain->nr), i;
2202	u8 cpumode = PERF_RECORD_MISC_USER;
2203	u64 ip, branch_from = 0;
 
 
2204
2205	for (i = 0; i < chain_nr; i++) {
2206		if (chain->ips[i] == PERF_CONTEXT_USER)
2207			break;
2208	}
2209
2210	/* LBR only affects the user callchain */
2211	if (i != chain_nr) {
2212		struct branch_stack *lbr_stack = sample->branch_stack;
2213		int lbr_nr = lbr_stack->nr, j, k;
2214		bool branch;
2215		struct branch_flags *flags;
2216		/*
2217		 * LBR callstack can only get user call chain.
2218		 * The mix_chain_nr is kernel call chain
2219		 * number plus LBR user call chain number.
2220		 * i is kernel call chain number,
2221		 * 1 is PERF_CONTEXT_USER,
2222		 * lbr_nr + 1 is the user call chain number.
2223		 * For details, please refer to the comments
2224		 * in callchain__printf
2225		 */
2226		int mix_chain_nr = i + 1 + lbr_nr + 1;
2227
2228		for (j = 0; j < mix_chain_nr; j++) {
2229			int err;
2230			branch = false;
2231			flags = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2232
2233			if (callchain_param.order == ORDER_CALLEE) {
2234				if (j < i + 1)
2235					ip = chain->ips[j];
2236				else if (j > i + 1) {
2237					k = j - i - 2;
2238					ip = lbr_stack->entries[k].from;
2239					branch = true;
2240					flags = &lbr_stack->entries[k].flags;
2241				} else {
2242					ip = lbr_stack->entries[0].to;
2243					branch = true;
2244					flags = &lbr_stack->entries[0].flags;
2245					branch_from =
2246						lbr_stack->entries[0].from;
2247				}
2248			} else {
2249				if (j < lbr_nr) {
2250					k = lbr_nr - j - 1;
2251					ip = lbr_stack->entries[k].from;
2252					branch = true;
2253					flags = &lbr_stack->entries[k].flags;
2254				}
2255				else if (j > lbr_nr)
2256					ip = chain->ips[i + 1 - (j - lbr_nr)];
2257				else {
2258					ip = lbr_stack->entries[0].to;
2259					branch = true;
2260					flags = &lbr_stack->entries[0].flags;
2261					branch_from =
2262						lbr_stack->entries[0].from;
2263				}
2264			}
2265
2266			err = add_callchain_ip(thread, cursor, parent,
2267					       root_al, &cpumode, ip,
2268					       branch, flags, NULL,
2269					       branch_from);
2270			if (err)
2271				return (err < 0) ? err : 0;
2272		}
2273		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2274	}
 
2275
2276	return 0;
 
2277}
2278
2279static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2280			     struct callchain_cursor *cursor,
2281			     struct symbol **parent,
2282			     struct addr_location *root_al,
2283			     u8 *cpumode, int ent)
2284{
2285	int err = 0;
2286
2287	while (--ent >= 0) {
2288		u64 ip = chain->ips[ent];
2289
2290		if (ip >= PERF_CONTEXT_MAX) {
2291			err = add_callchain_ip(thread, cursor, parent,
2292					       root_al, cpumode, ip,
2293					       false, NULL, NULL, 0);
2294			break;
2295		}
2296	}
2297	return err;
2298}
2299
2300static int thread__resolve_callchain_sample(struct thread *thread,
2301					    struct callchain_cursor *cursor,
2302					    struct evsel *evsel,
2303					    struct perf_sample *sample,
2304					    struct symbol **parent,
2305					    struct addr_location *root_al,
2306					    int max_stack)
2307{
2308	struct branch_stack *branch = sample->branch_stack;
 
2309	struct ip_callchain *chain = sample->callchain;
2310	int chain_nr = 0;
2311	u8 cpumode = PERF_RECORD_MISC_USER;
2312	int i, j, err, nr_entries;
2313	int skip_idx = -1;
2314	int first_call = 0;
2315
2316	if (chain)
2317		chain_nr = chain->nr;
2318
2319	if (perf_evsel__has_branch_callstack(evsel)) {
 
 
2320		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2321						   root_al, max_stack);
 
2322		if (err)
2323			return (err < 0) ? err : 0;
2324	}
2325
2326	/*
2327	 * Based on DWARF debug information, some architectures skip
2328	 * a callchain entry saved by the kernel.
2329	 */
2330	skip_idx = arch_skip_callchain_idx(thread, chain);
2331
2332	/*
2333	 * Add branches to call stack for easier browsing. This gives
2334	 * more context for a sample than just the callers.
2335	 *
2336	 * This uses individual histograms of paths compared to the
2337	 * aggregated histograms the normal LBR mode uses.
2338	 *
2339	 * Limitations for now:
2340	 * - No extra filters
2341	 * - No annotations (should annotate somehow)
2342	 */
2343
2344	if (branch && callchain_param.branch_callstack) {
2345		int nr = min(max_stack, (int)branch->nr);
2346		struct branch_entry be[nr];
2347		struct iterations iter[nr];
2348
2349		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2350			pr_warning("corrupted branch chain. skipping...\n");
2351			goto check_calls;
2352		}
2353
2354		for (i = 0; i < nr; i++) {
2355			if (callchain_param.order == ORDER_CALLEE) {
2356				be[i] = branch->entries[i];
2357
2358				if (chain == NULL)
2359					continue;
2360
2361				/*
2362				 * Check for overlap into the callchain.
2363				 * The return address is one off compared to
2364				 * the branch entry. To adjust for this
2365				 * assume the calling instruction is not longer
2366				 * than 8 bytes.
2367				 */
2368				if (i == skip_idx ||
2369				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
2370					first_call++;
2371				else if (be[i].from < chain->ips[first_call] &&
2372				    be[i].from >= chain->ips[first_call] - 8)
2373					first_call++;
2374			} else
2375				be[i] = branch->entries[branch->nr - i - 1];
2376		}
2377
2378		memset(iter, 0, sizeof(struct iterations) * nr);
2379		nr = remove_loops(be, nr, iter);
2380
2381		for (i = 0; i < nr; i++) {
2382			err = add_callchain_ip(thread, cursor, parent,
2383					       root_al,
2384					       NULL, be[i].to,
2385					       true, &be[i].flags,
2386					       NULL, be[i].from);
2387
2388			if (!err)
2389				err = add_callchain_ip(thread, cursor, parent, root_al,
2390						       NULL, be[i].from,
2391						       true, &be[i].flags,
2392						       &iter[i], 0);
2393			if (err == -EINVAL)
2394				break;
2395			if (err)
2396				return err;
2397		}
2398
2399		if (chain_nr == 0)
2400			return 0;
2401
2402		chain_nr -= nr;
2403	}
2404
2405check_calls:
2406	if (callchain_param.order != ORDER_CALLEE) {
2407		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2408					&cpumode, chain->nr - first_call);
2409		if (err)
2410			return (err < 0) ? err : 0;
2411	}
2412	for (i = first_call, nr_entries = 0;
2413	     i < chain_nr && nr_entries < max_stack; i++) {
2414		u64 ip;
2415
2416		if (callchain_param.order == ORDER_CALLEE)
2417			j = i;
2418		else
2419			j = chain->nr - i - 1;
2420
2421#ifdef HAVE_SKIP_CALLCHAIN_IDX
2422		if (j == skip_idx)
2423			continue;
2424#endif
2425		ip = chain->ips[j];
2426		if (ip < PERF_CONTEXT_MAX)
2427                       ++nr_entries;
2428		else if (callchain_param.order != ORDER_CALLEE) {
2429			err = find_prev_cpumode(chain, thread, cursor, parent,
2430						root_al, &cpumode, j);
2431			if (err)
2432				return (err < 0) ? err : 0;
2433			continue;
2434		}
2435
2436		err = add_callchain_ip(thread, cursor, parent,
2437				       root_al, &cpumode, ip,
2438				       false, NULL, NULL, 0);
2439
2440		if (err)
2441			return (err < 0) ? err : 0;
2442	}
2443
2444	return 0;
2445}
2446
2447static int append_inlines(struct callchain_cursor *cursor,
2448			  struct map *map, struct symbol *sym, u64 ip)
2449{
 
 
2450	struct inline_node *inline_node;
2451	struct inline_list *ilist;
2452	u64 addr;
2453	int ret = 1;
2454
2455	if (!symbol_conf.inline_name || !map || !sym)
2456		return ret;
2457
2458	addr = map__map_ip(map, ip);
2459	addr = map__rip_2objdump(map, addr);
2460
2461	inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2462	if (!inline_node) {
2463		inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2464		if (!inline_node)
2465			return ret;
2466		inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2467	}
2468
2469	list_for_each_entry(ilist, &inline_node->val, list) {
2470		ret = callchain_cursor_append(cursor, ip, map,
2471					      ilist->symbol, false,
 
 
 
 
2472					      NULL, 0, 0, 0, ilist->srcline);
2473
2474		if (ret != 0)
2475			return ret;
2476	}
2477
2478	return ret;
2479}
2480
2481static int unwind_entry(struct unwind_entry *entry, void *arg)
2482{
2483	struct callchain_cursor *cursor = arg;
2484	const char *srcline = NULL;
2485	u64 addr = entry->ip;
2486
2487	if (symbol_conf.hide_unresolved && entry->sym == NULL)
2488		return 0;
2489
2490	if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
2491		return 0;
2492
2493	/*
2494	 * Convert entry->ip from a virtual address to an offset in
2495	 * its corresponding binary.
2496	 */
2497	if (entry->map)
2498		addr = map__map_ip(entry->map, entry->ip);
2499
2500	srcline = callchain_srcline(entry->map, entry->sym, addr);
2501	return callchain_cursor_append(cursor, entry->ip,
2502				       entry->map, entry->sym,
2503				       false, NULL, 0, 0, 0, srcline);
2504}
2505
2506static int thread__resolve_callchain_unwind(struct thread *thread,
2507					    struct callchain_cursor *cursor,
2508					    struct evsel *evsel,
2509					    struct perf_sample *sample,
2510					    int max_stack)
2511{
2512	/* Can we do dwarf post unwind? */
2513	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2514	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2515		return 0;
2516
2517	/* Bail out if nothing was captured. */
2518	if ((!sample->user_regs.regs) ||
2519	    (!sample->user_stack.size))
2520		return 0;
2521
2522	return unwind__get_entries(unwind_entry, cursor,
2523				   thread, sample, max_stack);
2524}
2525
2526int thread__resolve_callchain(struct thread *thread,
2527			      struct callchain_cursor *cursor,
2528			      struct evsel *evsel,
2529			      struct perf_sample *sample,
2530			      struct symbol **parent,
2531			      struct addr_location *root_al,
2532			      int max_stack)
2533{
2534	int ret = 0;
2535
2536	callchain_cursor_reset(cursor);
2537
2538	if (callchain_param.order == ORDER_CALLEE) {
2539		ret = thread__resolve_callchain_sample(thread, cursor,
2540						       evsel, sample,
2541						       parent, root_al,
2542						       max_stack);
2543		if (ret)
2544			return ret;
2545		ret = thread__resolve_callchain_unwind(thread, cursor,
2546						       evsel, sample,
2547						       max_stack);
2548	} else {
2549		ret = thread__resolve_callchain_unwind(thread, cursor,
2550						       evsel, sample,
2551						       max_stack);
2552		if (ret)
2553			return ret;
2554		ret = thread__resolve_callchain_sample(thread, cursor,
2555						       evsel, sample,
2556						       parent, root_al,
2557						       max_stack);
2558	}
2559
2560	return ret;
2561}
2562
2563int machine__for_each_thread(struct machine *machine,
2564			     int (*fn)(struct thread *thread, void *p),
2565			     void *priv)
2566{
2567	struct threads *threads;
2568	struct rb_node *nd;
2569	struct thread *thread;
2570	int rc = 0;
2571	int i;
2572
2573	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2574		threads = &machine->threads[i];
2575		for (nd = rb_first_cached(&threads->entries); nd;
2576		     nd = rb_next(nd)) {
2577			thread = rb_entry(nd, struct thread, rb_node);
2578			rc = fn(thread, priv);
2579			if (rc != 0)
2580				return rc;
2581		}
2582
2583		list_for_each_entry(thread, &threads->dead, node) {
2584			rc = fn(thread, priv);
2585			if (rc != 0)
2586				return rc;
2587		}
2588	}
2589	return rc;
2590}
2591
2592int machines__for_each_thread(struct machines *machines,
2593			      int (*fn)(struct thread *thread, void *p),
2594			      void *priv)
2595{
2596	struct rb_node *nd;
2597	int rc = 0;
2598
2599	rc = machine__for_each_thread(&machines->host, fn, priv);
2600	if (rc != 0)
2601		return rc;
2602
2603	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
2604		struct machine *machine = rb_entry(nd, struct machine, rb_node);
2605
2606		rc = machine__for_each_thread(machine, fn, priv);
2607		if (rc != 0)
2608			return rc;
2609	}
2610	return rc;
2611}
2612
2613pid_t machine__get_current_tid(struct machine *machine, int cpu)
2614{
2615	int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2616
2617	if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
2618		return -1;
2619
2620	return machine->current_tid[cpu];
2621}
2622
2623int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2624			     pid_t tid)
2625{
2626	struct thread *thread;
2627	int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2628
2629	if (cpu < 0)
2630		return -EINVAL;
2631
2632	if (!machine->current_tid) {
2633		int i;
2634
2635		machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
2636		if (!machine->current_tid)
2637			return -ENOMEM;
2638		for (i = 0; i < nr_cpus; i++)
2639			machine->current_tid[i] = -1;
2640	}
2641
2642	if (cpu >= nr_cpus) {
2643		pr_err("Requested CPU %d too large. ", cpu);
2644		pr_err("Consider raising MAX_NR_CPUS\n");
2645		return -EINVAL;
2646	}
2647
2648	machine->current_tid[cpu] = tid;
2649
2650	thread = machine__findnew_thread(machine, pid, tid);
2651	if (!thread)
2652		return -ENOMEM;
2653
2654	thread->cpu = cpu;
2655	thread__put(thread);
2656
2657	return 0;
2658}
2659
2660/*
2661 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2662 * normalized arch is needed.
2663 */
2664bool machine__is(struct machine *machine, const char *arch)
2665{
2666	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
2667}
2668
2669int machine__nr_cpus_avail(struct machine *machine)
2670{
2671	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
2672}
2673
2674int machine__get_kernel_start(struct machine *machine)
2675{
2676	struct map *map = machine__kernel_map(machine);
2677	int err = 0;
2678
2679	/*
2680	 * The only addresses above 2^63 are kernel addresses of a 64-bit
2681	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
2682	 * all addresses including kernel addresses are less than 2^32.  In
2683	 * that case (32-bit system), if the kernel mapping is unknown, all
2684	 * addresses will be assumed to be in user space - see
2685	 * machine__kernel_ip().
2686	 */
2687	machine->kernel_start = 1ULL << 63;
2688	if (map) {
2689		err = map__load(map);
2690		/*
2691		 * On x86_64, PTI entry trampolines are less than the
2692		 * start of kernel text, but still above 2^63. So leave
2693		 * kernel_start = 1ULL << 63 for x86_64.
2694		 */
2695		if (!err && !machine__is(machine, "x86_64"))
2696			machine->kernel_start = map->start;
2697	}
2698	return err;
2699}
2700
2701u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
2702{
2703	u8 addr_cpumode = cpumode;
2704	bool kernel_ip;
2705
2706	if (!machine->single_address_space)
2707		goto out;
2708
2709	kernel_ip = machine__kernel_ip(machine, addr);
2710	switch (cpumode) {
2711	case PERF_RECORD_MISC_KERNEL:
2712	case PERF_RECORD_MISC_USER:
2713		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
2714					   PERF_RECORD_MISC_USER;
2715		break;
2716	case PERF_RECORD_MISC_GUEST_KERNEL:
2717	case PERF_RECORD_MISC_GUEST_USER:
2718		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
2719					   PERF_RECORD_MISC_GUEST_USER;
2720		break;
2721	default:
2722		break;
2723	}
2724out:
2725	return addr_cpumode;
2726}
2727
 
 
 
 
 
2728struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2729{
2730	return dsos__findnew(&machine->dsos, filename);
2731}
2732
2733char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2734{
2735	struct machine *machine = vmachine;
2736	struct map *map;
2737	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
2738
2739	if (sym == NULL)
2740		return NULL;
2741
2742	*modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2743	*addrp = map->unmap_ip(map, sym->start);
2744	return sym->name;
2745}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2#include <dirent.h>
   3#include <errno.h>
   4#include <inttypes.h>
   5#include <regex.h>
   6#include <stdlib.h>
   7#include "callchain.h"
   8#include "debug.h"
   9#include "dso.h"
  10#include "env.h"
  11#include "event.h"
  12#include "evsel.h"
  13#include "hist.h"
  14#include "machine.h"
  15#include "map.h"
  16#include "map_symbol.h"
  17#include "branch.h"
  18#include "mem-events.h"
  19#include "srcline.h"
  20#include "symbol.h"
  21#include "sort.h"
  22#include "strlist.h"
  23#include "target.h"
  24#include "thread.h"
  25#include "util.h"
  26#include "vdso.h"
  27#include <stdbool.h>
  28#include <sys/types.h>
  29#include <sys/stat.h>
  30#include <unistd.h>
  31#include "unwind.h"
  32#include "linux/hash.h"
  33#include "asm/bug.h"
  34#include "bpf-event.h"
  35#include <internal/lib.h> // page_size
  36#include "cgroup.h"
  37
  38#include <linux/ctype.h>
  39#include <symbol/kallsyms.h>
  40#include <linux/mman.h>
  41#include <linux/string.h>
  42#include <linux/zalloc.h>
  43
  44static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
  45
  46static struct dso *machine__kernel_dso(struct machine *machine)
  47{
  48	return machine->vmlinux_map->dso;
  49}
  50
  51static void dsos__init(struct dsos *dsos)
  52{
  53	INIT_LIST_HEAD(&dsos->head);
  54	dsos->root = RB_ROOT;
  55	init_rwsem(&dsos->lock);
  56}
  57
  58static void machine__threads_init(struct machine *machine)
  59{
  60	int i;
  61
  62	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
  63		struct threads *threads = &machine->threads[i];
  64		threads->entries = RB_ROOT_CACHED;
  65		init_rwsem(&threads->lock);
  66		threads->nr = 0;
  67		INIT_LIST_HEAD(&threads->dead);
  68		threads->last_match = NULL;
  69	}
  70}
  71
  72static int machine__set_mmap_name(struct machine *machine)
  73{
  74	if (machine__is_host(machine))
  75		machine->mmap_name = strdup("[kernel.kallsyms]");
  76	else if (machine__is_default_guest(machine))
  77		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
  78	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
  79			  machine->pid) < 0)
  80		machine->mmap_name = NULL;
  81
  82	return machine->mmap_name ? 0 : -ENOMEM;
  83}
  84
  85int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  86{
  87	int err = -ENOMEM;
  88
  89	memset(machine, 0, sizeof(*machine));
  90	maps__init(&machine->kmaps, machine);
  91	RB_CLEAR_NODE(&machine->rb_node);
  92	dsos__init(&machine->dsos);
  93
  94	machine__threads_init(machine);
  95
  96	machine->vdso_info = NULL;
  97	machine->env = NULL;
  98
  99	machine->pid = pid;
 100
 101	machine->id_hdr_size = 0;
 102	machine->kptr_restrict_warned = false;
 103	machine->comm_exec = false;
 104	machine->kernel_start = 0;
 105	machine->vmlinux_map = NULL;
 106
 107	machine->root_dir = strdup(root_dir);
 108	if (machine->root_dir == NULL)
 109		return -ENOMEM;
 110
 111	if (machine__set_mmap_name(machine))
 112		goto out;
 113
 114	if (pid != HOST_KERNEL_ID) {
 115		struct thread *thread = machine__findnew_thread(machine, -1,
 116								pid);
 117		char comm[64];
 118
 119		if (thread == NULL)
 120			goto out;
 121
 122		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
 123		thread__set_comm(thread, comm, 0);
 124		thread__put(thread);
 125	}
 126
 127	machine->current_tid = NULL;
 128	err = 0;
 129
 130out:
 131	if (err) {
 132		zfree(&machine->root_dir);
 133		zfree(&machine->mmap_name);
 134	}
 135	return 0;
 136}
 137
 138struct machine *machine__new_host(void)
 139{
 140	struct machine *machine = malloc(sizeof(*machine));
 141
 142	if (machine != NULL) {
 143		machine__init(machine, "", HOST_KERNEL_ID);
 144
 145		if (machine__create_kernel_maps(machine) < 0)
 146			goto out_delete;
 147	}
 148
 149	return machine;
 150out_delete:
 151	free(machine);
 152	return NULL;
 153}
 154
 155struct machine *machine__new_kallsyms(void)
 156{
 157	struct machine *machine = machine__new_host();
 158	/*
 159	 * FIXME:
 160	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
 161	 *    ask for not using the kcore parsing code, once this one is fixed
 162	 *    to create a map per module.
 163	 */
 164	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
 165		machine__delete(machine);
 166		machine = NULL;
 167	}
 168
 169	return machine;
 170}
 171
 172static void dsos__purge(struct dsos *dsos)
 173{
 174	struct dso *pos, *n;
 175
 176	down_write(&dsos->lock);
 177
 178	list_for_each_entry_safe(pos, n, &dsos->head, node) {
 179		RB_CLEAR_NODE(&pos->rb_node);
 180		pos->root = NULL;
 181		list_del_init(&pos->node);
 182		dso__put(pos);
 183	}
 184
 185	up_write(&dsos->lock);
 186}
 187
 188static void dsos__exit(struct dsos *dsos)
 189{
 190	dsos__purge(dsos);
 191	exit_rwsem(&dsos->lock);
 192}
 193
 194void machine__delete_threads(struct machine *machine)
 195{
 196	struct rb_node *nd;
 197	int i;
 198
 199	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 200		struct threads *threads = &machine->threads[i];
 201		down_write(&threads->lock);
 202		nd = rb_first_cached(&threads->entries);
 203		while (nd) {
 204			struct thread *t = rb_entry(nd, struct thread, rb_node);
 205
 206			nd = rb_next(nd);
 207			__machine__remove_thread(machine, t, false);
 208		}
 209		up_write(&threads->lock);
 210	}
 211}
 212
 213void machine__exit(struct machine *machine)
 214{
 215	int i;
 216
 217	if (machine == NULL)
 218		return;
 219
 220	machine__destroy_kernel_maps(machine);
 221	maps__exit(&machine->kmaps);
 222	dsos__exit(&machine->dsos);
 223	machine__exit_vdso(machine);
 224	zfree(&machine->root_dir);
 225	zfree(&machine->mmap_name);
 226	zfree(&machine->current_tid);
 227
 228	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 229		struct threads *threads = &machine->threads[i];
 230		struct thread *thread, *n;
 231		/*
 232		 * Forget about the dead, at this point whatever threads were
 233		 * left in the dead lists better have a reference count taken
 234		 * by who is using them, and then, when they drop those references
 235		 * and it finally hits zero, thread__put() will check and see that
 236		 * its not in the dead threads list and will not try to remove it
 237		 * from there, just calling thread__delete() straight away.
 238		 */
 239		list_for_each_entry_safe(thread, n, &threads->dead, node)
 240			list_del_init(&thread->node);
 241
 242		exit_rwsem(&threads->lock);
 243	}
 244}
 245
 246void machine__delete(struct machine *machine)
 247{
 248	if (machine) {
 249		machine__exit(machine);
 250		free(machine);
 251	}
 252}
 253
 254void machines__init(struct machines *machines)
 255{
 256	machine__init(&machines->host, "", HOST_KERNEL_ID);
 257	machines->guests = RB_ROOT_CACHED;
 258}
 259
 260void machines__exit(struct machines *machines)
 261{
 262	machine__exit(&machines->host);
 263	/* XXX exit guest */
 264}
 265
 266struct machine *machines__add(struct machines *machines, pid_t pid,
 267			      const char *root_dir)
 268{
 269	struct rb_node **p = &machines->guests.rb_root.rb_node;
 270	struct rb_node *parent = NULL;
 271	struct machine *pos, *machine = malloc(sizeof(*machine));
 272	bool leftmost = true;
 273
 274	if (machine == NULL)
 275		return NULL;
 276
 277	if (machine__init(machine, root_dir, pid) != 0) {
 278		free(machine);
 279		return NULL;
 280	}
 281
 282	while (*p != NULL) {
 283		parent = *p;
 284		pos = rb_entry(parent, struct machine, rb_node);
 285		if (pid < pos->pid)
 286			p = &(*p)->rb_left;
 287		else {
 288			p = &(*p)->rb_right;
 289			leftmost = false;
 290		}
 291	}
 292
 293	rb_link_node(&machine->rb_node, parent, p);
 294	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
 295
 296	return machine;
 297}
 298
 299void machines__set_comm_exec(struct machines *machines, bool comm_exec)
 300{
 301	struct rb_node *nd;
 302
 303	machines->host.comm_exec = comm_exec;
 304
 305	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 306		struct machine *machine = rb_entry(nd, struct machine, rb_node);
 307
 308		machine->comm_exec = comm_exec;
 309	}
 310}
 311
 312struct machine *machines__find(struct machines *machines, pid_t pid)
 313{
 314	struct rb_node **p = &machines->guests.rb_root.rb_node;
 315	struct rb_node *parent = NULL;
 316	struct machine *machine;
 317	struct machine *default_machine = NULL;
 318
 319	if (pid == HOST_KERNEL_ID)
 320		return &machines->host;
 321
 322	while (*p != NULL) {
 323		parent = *p;
 324		machine = rb_entry(parent, struct machine, rb_node);
 325		if (pid < machine->pid)
 326			p = &(*p)->rb_left;
 327		else if (pid > machine->pid)
 328			p = &(*p)->rb_right;
 329		else
 330			return machine;
 331		if (!machine->pid)
 332			default_machine = machine;
 333	}
 334
 335	return default_machine;
 336}
 337
 338struct machine *machines__findnew(struct machines *machines, pid_t pid)
 339{
 340	char path[PATH_MAX];
 341	const char *root_dir = "";
 342	struct machine *machine = machines__find(machines, pid);
 343
 344	if (machine && (machine->pid == pid))
 345		goto out;
 346
 347	if ((pid != HOST_KERNEL_ID) &&
 348	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
 349	    (symbol_conf.guestmount)) {
 350		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
 351		if (access(path, R_OK)) {
 352			static struct strlist *seen;
 353
 354			if (!seen)
 355				seen = strlist__new(NULL, NULL);
 356
 357			if (!strlist__has_entry(seen, path)) {
 358				pr_err("Can't access file %s\n", path);
 359				strlist__add(seen, path);
 360			}
 361			machine = NULL;
 362			goto out;
 363		}
 364		root_dir = path;
 365	}
 366
 367	machine = machines__add(machines, pid, root_dir);
 368out:
 369	return machine;
 370}
 371
 372void machines__process_guests(struct machines *machines,
 373			      machine__process_t process, void *data)
 374{
 375	struct rb_node *nd;
 376
 377	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 378		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 379		process(pos, data);
 380	}
 381}
 382
 383void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
 384{
 385	struct rb_node *node;
 386	struct machine *machine;
 387
 388	machines->host.id_hdr_size = id_hdr_size;
 389
 390	for (node = rb_first_cached(&machines->guests); node;
 391	     node = rb_next(node)) {
 392		machine = rb_entry(node, struct machine, rb_node);
 393		machine->id_hdr_size = id_hdr_size;
 394	}
 395
 396	return;
 397}
 398
 399static void machine__update_thread_pid(struct machine *machine,
 400				       struct thread *th, pid_t pid)
 401{
 402	struct thread *leader;
 403
 404	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
 405		return;
 406
 407	th->pid_ = pid;
 408
 409	if (th->pid_ == th->tid)
 410		return;
 411
 412	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
 413	if (!leader)
 414		goto out_err;
 415
 416	if (!leader->maps)
 417		leader->maps = maps__new(machine);
 418
 419	if (!leader->maps)
 420		goto out_err;
 421
 422	if (th->maps == leader->maps)
 423		return;
 424
 425	if (th->maps) {
 426		/*
 427		 * Maps are created from MMAP events which provide the pid and
 428		 * tid.  Consequently there never should be any maps on a thread
 429		 * with an unknown pid.  Just print an error if there are.
 430		 */
 431		if (!maps__empty(th->maps))
 432			pr_err("Discarding thread maps for %d:%d\n",
 433			       th->pid_, th->tid);
 434		maps__put(th->maps);
 435	}
 436
 437	th->maps = maps__get(leader->maps);
 438out_put:
 439	thread__put(leader);
 440	return;
 441out_err:
 442	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
 443	goto out_put;
 444}
 445
 446/*
 447 * Front-end cache - TID lookups come in blocks,
 448 * so most of the time we dont have to look up
 449 * the full rbtree:
 450 */
 451static struct thread*
 452__threads__get_last_match(struct threads *threads, struct machine *machine,
 453			  int pid, int tid)
 454{
 455	struct thread *th;
 456
 457	th = threads->last_match;
 458	if (th != NULL) {
 459		if (th->tid == tid) {
 460			machine__update_thread_pid(machine, th, pid);
 461			return thread__get(th);
 462		}
 463
 464		threads->last_match = NULL;
 465	}
 466
 467	return NULL;
 468}
 469
 470static struct thread*
 471threads__get_last_match(struct threads *threads, struct machine *machine,
 472			int pid, int tid)
 473{
 474	struct thread *th = NULL;
 475
 476	if (perf_singlethreaded)
 477		th = __threads__get_last_match(threads, machine, pid, tid);
 478
 479	return th;
 480}
 481
 482static void
 483__threads__set_last_match(struct threads *threads, struct thread *th)
 484{
 485	threads->last_match = th;
 486}
 487
 488static void
 489threads__set_last_match(struct threads *threads, struct thread *th)
 490{
 491	if (perf_singlethreaded)
 492		__threads__set_last_match(threads, th);
 493}
 494
 495/*
 496 * Caller must eventually drop thread->refcnt returned with a successful
 497 * lookup/new thread inserted.
 498 */
 499static struct thread *____machine__findnew_thread(struct machine *machine,
 500						  struct threads *threads,
 501						  pid_t pid, pid_t tid,
 502						  bool create)
 503{
 504	struct rb_node **p = &threads->entries.rb_root.rb_node;
 505	struct rb_node *parent = NULL;
 506	struct thread *th;
 507	bool leftmost = true;
 508
 509	th = threads__get_last_match(threads, machine, pid, tid);
 510	if (th)
 511		return th;
 512
 513	while (*p != NULL) {
 514		parent = *p;
 515		th = rb_entry(parent, struct thread, rb_node);
 516
 517		if (th->tid == tid) {
 518			threads__set_last_match(threads, th);
 519			machine__update_thread_pid(machine, th, pid);
 520			return thread__get(th);
 521		}
 522
 523		if (tid < th->tid)
 524			p = &(*p)->rb_left;
 525		else {
 526			p = &(*p)->rb_right;
 527			leftmost = false;
 528		}
 529	}
 530
 531	if (!create)
 532		return NULL;
 533
 534	th = thread__new(pid, tid);
 535	if (th != NULL) {
 536		rb_link_node(&th->rb_node, parent, p);
 537		rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
 538
 539		/*
 540		 * We have to initialize maps separately after rb tree is updated.
 
 541		 *
 542		 * The reason is that we call machine__findnew_thread
 543		 * within thread__init_maps to find the thread
 544		 * leader and that would screwed the rb tree.
 545		 */
 546		if (thread__init_maps(th, machine)) {
 547			rb_erase_cached(&th->rb_node, &threads->entries);
 548			RB_CLEAR_NODE(&th->rb_node);
 549			thread__put(th);
 550			return NULL;
 551		}
 552		/*
 553		 * It is now in the rbtree, get a ref
 554		 */
 555		thread__get(th);
 556		threads__set_last_match(threads, th);
 557		++threads->nr;
 558	}
 559
 560	return th;
 561}
 562
 563struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
 564{
 565	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
 566}
 567
 568struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
 569				       pid_t tid)
 570{
 571	struct threads *threads = machine__threads(machine, tid);
 572	struct thread *th;
 573
 574	down_write(&threads->lock);
 575	th = __machine__findnew_thread(machine, pid, tid);
 576	up_write(&threads->lock);
 577	return th;
 578}
 579
 580struct thread *machine__find_thread(struct machine *machine, pid_t pid,
 581				    pid_t tid)
 582{
 583	struct threads *threads = machine__threads(machine, tid);
 584	struct thread *th;
 585
 586	down_read(&threads->lock);
 587	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
 588	up_read(&threads->lock);
 589	return th;
 590}
 591
 592struct comm *machine__thread_exec_comm(struct machine *machine,
 593				       struct thread *thread)
 594{
 595	if (machine->comm_exec)
 596		return thread__exec_comm(thread);
 597	else
 598		return thread__comm(thread);
 599}
 600
 601int machine__process_comm_event(struct machine *machine, union perf_event *event,
 602				struct perf_sample *sample)
 603{
 604	struct thread *thread = machine__findnew_thread(machine,
 605							event->comm.pid,
 606							event->comm.tid);
 607	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
 608	int err = 0;
 609
 610	if (exec)
 611		machine->comm_exec = true;
 612
 613	if (dump_trace)
 614		perf_event__fprintf_comm(event, stdout);
 615
 616	if (thread == NULL ||
 617	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
 618		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
 619		err = -1;
 620	}
 621
 622	thread__put(thread);
 623
 624	return err;
 625}
 626
 627int machine__process_namespaces_event(struct machine *machine __maybe_unused,
 628				      union perf_event *event,
 629				      struct perf_sample *sample __maybe_unused)
 630{
 631	struct thread *thread = machine__findnew_thread(machine,
 632							event->namespaces.pid,
 633							event->namespaces.tid);
 634	int err = 0;
 635
 636	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
 637		  "\nWARNING: kernel seems to support more namespaces than perf"
 638		  " tool.\nTry updating the perf tool..\n\n");
 639
 640	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
 641		  "\nWARNING: perf tool seems to support more namespaces than"
 642		  " the kernel.\nTry updating the kernel..\n\n");
 643
 644	if (dump_trace)
 645		perf_event__fprintf_namespaces(event, stdout);
 646
 647	if (thread == NULL ||
 648	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
 649		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
 650		err = -1;
 651	}
 652
 653	thread__put(thread);
 654
 655	return err;
 656}
 657
 658int machine__process_cgroup_event(struct machine *machine,
 659				  union perf_event *event,
 660				  struct perf_sample *sample __maybe_unused)
 661{
 662	struct cgroup *cgrp;
 663
 664	if (dump_trace)
 665		perf_event__fprintf_cgroup(event, stdout);
 666
 667	cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
 668	if (cgrp == NULL)
 669		return -ENOMEM;
 670
 671	return 0;
 672}
 673
 674int machine__process_lost_event(struct machine *machine __maybe_unused,
 675				union perf_event *event, struct perf_sample *sample __maybe_unused)
 676{
 677	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
 678		    event->lost.id, event->lost.lost);
 679	return 0;
 680}
 681
 682int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
 683					union perf_event *event, struct perf_sample *sample)
 684{
 685	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
 686		    sample->id, event->lost_samples.lost);
 687	return 0;
 688}
 689
 690static struct dso *machine__findnew_module_dso(struct machine *machine,
 691					       struct kmod_path *m,
 692					       const char *filename)
 693{
 694	struct dso *dso;
 695
 696	down_write(&machine->dsos.lock);
 697
 698	dso = __dsos__find(&machine->dsos, m->name, true);
 699	if (!dso) {
 700		dso = __dsos__addnew(&machine->dsos, m->name);
 701		if (dso == NULL)
 702			goto out_unlock;
 703
 704		dso__set_module_info(dso, m, machine);
 705		dso__set_long_name(dso, strdup(filename), true);
 706		dso->kernel = DSO_SPACE__KERNEL;
 707	}
 708
 709	dso__get(dso);
 710out_unlock:
 711	up_write(&machine->dsos.lock);
 712	return dso;
 713}
 714
 715int machine__process_aux_event(struct machine *machine __maybe_unused,
 716			       union perf_event *event)
 717{
 718	if (dump_trace)
 719		perf_event__fprintf_aux(event, stdout);
 720	return 0;
 721}
 722
 723int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
 724					union perf_event *event)
 725{
 726	if (dump_trace)
 727		perf_event__fprintf_itrace_start(event, stdout);
 728	return 0;
 729}
 730
 731int machine__process_switch_event(struct machine *machine __maybe_unused,
 732				  union perf_event *event)
 733{
 734	if (dump_trace)
 735		perf_event__fprintf_switch(event, stdout);
 736	return 0;
 737}
 738
 739static int machine__process_ksymbol_register(struct machine *machine,
 740					     union perf_event *event,
 741					     struct perf_sample *sample __maybe_unused)
 742{
 743	struct symbol *sym;
 744	struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
 745
 
 746	if (!map) {
 747		struct dso *dso = dso__new(event->ksymbol.name);
 748
 749		if (dso) {
 750			dso->kernel = DSO_SPACE__KERNEL;
 751			map = map__new2(0, dso);
 752		}
 753
 754		if (!dso || !map) {
 755			dso__put(dso);
 756			return -ENOMEM;
 757		}
 758
 759		if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
 760			map->dso->binary_type = DSO_BINARY_TYPE__OOL;
 761			map->dso->data.file_size = event->ksymbol.len;
 762			dso__set_loaded(map->dso);
 763		}
 764
 765		map->start = event->ksymbol.addr;
 766		map->end = map->start + event->ksymbol.len;
 767		maps__insert(&machine->kmaps, map);
 768		dso__set_loaded(dso);
 769
 770		if (is_bpf_image(event->ksymbol.name)) {
 771			dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
 772			dso__set_long_name(dso, "", false);
 773		}
 774	}
 775
 776	sym = symbol__new(map->map_ip(map, map->start),
 777			  event->ksymbol.len,
 778			  0, 0, event->ksymbol.name);
 779	if (!sym)
 780		return -ENOMEM;
 781	dso__insert_symbol(map->dso, sym);
 782	return 0;
 783}
 784
 785static int machine__process_ksymbol_unregister(struct machine *machine,
 786					       union perf_event *event,
 787					       struct perf_sample *sample __maybe_unused)
 788{
 789	struct map *map;
 790
 791	map = maps__find(&machine->kmaps, event->ksymbol.addr);
 792	if (map)
 793		maps__remove(&machine->kmaps, map);
 794
 795	return 0;
 796}
 797
 798int machine__process_ksymbol(struct machine *machine __maybe_unused,
 799			     union perf_event *event,
 800			     struct perf_sample *sample)
 801{
 802	if (dump_trace)
 803		perf_event__fprintf_ksymbol(event, stdout);
 804
 805	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
 806		return machine__process_ksymbol_unregister(machine, event,
 807							   sample);
 808	return machine__process_ksymbol_register(machine, event, sample);
 809}
 810
 811int machine__process_text_poke(struct machine *machine, union perf_event *event,
 812			       struct perf_sample *sample __maybe_unused)
 813{
 814	struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
 815	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 816
 817	if (dump_trace)
 818		perf_event__fprintf_text_poke(event, machine, stdout);
 
 
 
 
 819
 820	if (!event->text_poke.new_len)
 821		return 0;
 822
 823	if (cpumode != PERF_RECORD_MISC_KERNEL) {
 824		pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
 825		return 0;
 826	}
 827
 828	if (map && map->dso) {
 829		u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
 830		int ret;
 831
 832		/*
 833		 * Kernel maps might be changed when loading symbols so loading
 834		 * must be done prior to using kernel maps.
 835		 */
 836		map__load(map);
 837		ret = dso__data_write_cache_addr(map->dso, map, machine,
 838						 event->text_poke.addr,
 839						 new_bytes,
 840						 event->text_poke.new_len);
 841		if (ret != event->text_poke.new_len)
 842			pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
 843				 event->text_poke.addr);
 844	} else {
 845		pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
 846			 event->text_poke.addr);
 847	}
 848
 849	return 0;
 850}
 851
 852static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
 853					      const char *filename)
 854{
 855	struct map *map = NULL;
 
 856	struct kmod_path m;
 857	struct dso *dso;
 858
 859	if (kmod_path__parse_name(&m, filename))
 860		return NULL;
 861
 
 
 
 
 
 
 
 
 
 
 
 862	dso = machine__findnew_module_dso(machine, &m, filename);
 863	if (dso == NULL)
 864		goto out;
 865
 866	map = map__new2(start, dso);
 867	if (map == NULL)
 868		goto out;
 869
 870	maps__insert(&machine->kmaps, map);
 871
 872	/* Put the map here because maps__insert alread got it */
 873	map__put(map);
 874out:
 875	/* put the dso here, corresponding to  machine__findnew_module_dso */
 876	dso__put(dso);
 877	zfree(&m.name);
 878	return map;
 879}
 880
 881size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
 882{
 883	struct rb_node *nd;
 884	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
 885
 886	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 887		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 888		ret += __dsos__fprintf(&pos->dsos.head, fp);
 889	}
 890
 891	return ret;
 892}
 893
 894size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
 895				     bool (skip)(struct dso *dso, int parm), int parm)
 896{
 897	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
 898}
 899
 900size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
 901				     bool (skip)(struct dso *dso, int parm), int parm)
 902{
 903	struct rb_node *nd;
 904	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
 905
 906	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
 907		struct machine *pos = rb_entry(nd, struct machine, rb_node);
 908		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
 909	}
 910	return ret;
 911}
 912
 913size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
 914{
 915	int i;
 916	size_t printed = 0;
 917	struct dso *kdso = machine__kernel_dso(machine);
 918
 919	if (kdso->has_build_id) {
 920		char filename[PATH_MAX];
 921		if (dso__build_id_filename(kdso, filename, sizeof(filename),
 922					   false))
 923			printed += fprintf(fp, "[0] %s\n", filename);
 924	}
 925
 926	for (i = 0; i < vmlinux_path__nr_entries; ++i)
 927		printed += fprintf(fp, "[%d] %s\n",
 928				   i + kdso->has_build_id, vmlinux_path[i]);
 929
 930	return printed;
 931}
 932
 933size_t machine__fprintf(struct machine *machine, FILE *fp)
 934{
 935	struct rb_node *nd;
 936	size_t ret;
 937	int i;
 938
 939	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
 940		struct threads *threads = &machine->threads[i];
 941
 942		down_read(&threads->lock);
 943
 944		ret = fprintf(fp, "Threads: %u\n", threads->nr);
 945
 946		for (nd = rb_first_cached(&threads->entries); nd;
 947		     nd = rb_next(nd)) {
 948			struct thread *pos = rb_entry(nd, struct thread, rb_node);
 949
 950			ret += thread__fprintf(pos, fp);
 951		}
 952
 953		up_read(&threads->lock);
 954	}
 955	return ret;
 956}
 957
 958static struct dso *machine__get_kernel(struct machine *machine)
 959{
 960	const char *vmlinux_name = machine->mmap_name;
 961	struct dso *kernel;
 962
 963	if (machine__is_host(machine)) {
 964		if (symbol_conf.vmlinux_name)
 965			vmlinux_name = symbol_conf.vmlinux_name;
 966
 967		kernel = machine__findnew_kernel(machine, vmlinux_name,
 968						 "[kernel]", DSO_SPACE__KERNEL);
 969	} else {
 970		if (symbol_conf.default_guest_vmlinux_name)
 971			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
 972
 973		kernel = machine__findnew_kernel(machine, vmlinux_name,
 974						 "[guest.kernel]",
 975						 DSO_SPACE__KERNEL_GUEST);
 976	}
 977
 978	if (kernel != NULL && (!kernel->has_build_id))
 979		dso__read_running_kernel_build_id(kernel, machine);
 980
 981	return kernel;
 982}
 983
 984struct process_args {
 985	u64 start;
 986};
 987
 988void machine__get_kallsyms_filename(struct machine *machine, char *buf,
 989				    size_t bufsz)
 990{
 991	if (machine__is_default_guest(machine))
 992		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
 993	else
 994		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
 995}
 996
 997const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
 998
 999/* Figure out the start address of kernel map from /proc/kallsyms.
1000 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1001 * symbol_name if it's not that important.
1002 */
1003static int machine__get_running_kernel_start(struct machine *machine,
1004					     const char **symbol_name,
1005					     u64 *start, u64 *end)
1006{
1007	char filename[PATH_MAX];
1008	int i, err = -1;
1009	const char *name;
1010	u64 addr = 0;
1011
1012	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1013
1014	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1015		return 0;
1016
1017	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1018		err = kallsyms__get_function_start(filename, name, &addr);
1019		if (!err)
1020			break;
1021	}
1022
1023	if (err)
1024		return -1;
1025
1026	if (symbol_name)
1027		*symbol_name = name;
1028
1029	*start = addr;
1030
1031	err = kallsyms__get_function_start(filename, "_etext", &addr);
1032	if (!err)
1033		*end = addr;
1034
1035	return 0;
1036}
1037
1038int machine__create_extra_kernel_map(struct machine *machine,
1039				     struct dso *kernel,
1040				     struct extra_kernel_map *xm)
1041{
1042	struct kmap *kmap;
1043	struct map *map;
1044
1045	map = map__new2(xm->start, kernel);
1046	if (!map)
1047		return -1;
1048
1049	map->end   = xm->end;
1050	map->pgoff = xm->pgoff;
1051
1052	kmap = map__kmap(map);
1053
 
1054	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1055
1056	maps__insert(&machine->kmaps, map);
1057
1058	pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1059		  kmap->name, map->start, map->end);
1060
1061	map__put(map);
1062
1063	return 0;
1064}
1065
1066static u64 find_entry_trampoline(struct dso *dso)
1067{
1068	/* Duplicates are removed so lookup all aliases */
1069	const char *syms[] = {
1070		"_entry_trampoline",
1071		"__entry_trampoline_start",
1072		"entry_SYSCALL_64_trampoline",
1073	};
1074	struct symbol *sym = dso__first_symbol(dso);
1075	unsigned int i;
1076
1077	for (; sym; sym = dso__next_symbol(sym)) {
1078		if (sym->binding != STB_GLOBAL)
1079			continue;
1080		for (i = 0; i < ARRAY_SIZE(syms); i++) {
1081			if (!strcmp(sym->name, syms[i]))
1082				return sym->start;
1083		}
1084	}
1085
1086	return 0;
1087}
1088
1089/*
1090 * These values can be used for kernels that do not have symbols for the entry
1091 * trampolines in kallsyms.
1092 */
1093#define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
1094#define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
1095#define X86_64_ENTRY_TRAMPOLINE		0x6000
1096
1097/* Map x86_64 PTI entry trampolines */
1098int machine__map_x86_64_entry_trampolines(struct machine *machine,
1099					  struct dso *kernel)
1100{
1101	struct maps *kmaps = &machine->kmaps;
 
1102	int nr_cpus_avail, cpu;
1103	bool found = false;
1104	struct map *map;
1105	u64 pgoff;
1106
1107	/*
1108	 * In the vmlinux case, pgoff is a virtual address which must now be
1109	 * mapped to a vmlinux offset.
1110	 */
1111	maps__for_each_entry(kmaps, map) {
1112		struct kmap *kmap = __map__kmap(map);
1113		struct map *dest_map;
1114
1115		if (!kmap || !is_entry_trampoline(kmap->name))
1116			continue;
1117
1118		dest_map = maps__find(kmaps, map->pgoff);
1119		if (dest_map != map)
1120			map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1121		found = true;
1122	}
1123	if (found || machine->trampolines_mapped)
1124		return 0;
1125
1126	pgoff = find_entry_trampoline(kernel);
1127	if (!pgoff)
1128		return 0;
1129
1130	nr_cpus_avail = machine__nr_cpus_avail(machine);
1131
1132	/* Add a 1 page map for each CPU's entry trampoline */
1133	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1134		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1135			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1136			 X86_64_ENTRY_TRAMPOLINE;
1137		struct extra_kernel_map xm = {
1138			.start = va,
1139			.end   = va + page_size,
1140			.pgoff = pgoff,
1141		};
1142
1143		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1144
1145		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1146			return -1;
1147	}
1148
1149	machine->trampolines_mapped = nr_cpus_avail;
1150
1151	return 0;
1152}
1153
1154int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1155					     struct dso *kernel __maybe_unused)
1156{
1157	return 0;
1158}
1159
1160static int
1161__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1162{
 
 
 
1163	/* In case of renewal the kernel map, destroy previous one */
1164	machine__destroy_kernel_maps(machine);
1165
1166	machine->vmlinux_map = map__new2(0, kernel);
1167	if (machine->vmlinux_map == NULL)
1168		return -1;
1169
1170	machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1171	maps__insert(&machine->kmaps, machine->vmlinux_map);
 
 
 
 
 
 
 
1172	return 0;
1173}
1174
1175void machine__destroy_kernel_maps(struct machine *machine)
1176{
1177	struct kmap *kmap;
1178	struct map *map = machine__kernel_map(machine);
1179
1180	if (map == NULL)
1181		return;
1182
1183	kmap = map__kmap(map);
1184	maps__remove(&machine->kmaps, map);
1185	if (kmap && kmap->ref_reloc_sym) {
1186		zfree((char **)&kmap->ref_reloc_sym->name);
1187		zfree(&kmap->ref_reloc_sym);
1188	}
1189
1190	map__zput(machine->vmlinux_map);
1191}
1192
1193int machines__create_guest_kernel_maps(struct machines *machines)
1194{
1195	int ret = 0;
1196	struct dirent **namelist = NULL;
1197	int i, items = 0;
1198	char path[PATH_MAX];
1199	pid_t pid;
1200	char *endp;
1201
1202	if (symbol_conf.default_guest_vmlinux_name ||
1203	    symbol_conf.default_guest_modules ||
1204	    symbol_conf.default_guest_kallsyms) {
1205		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1206	}
1207
1208	if (symbol_conf.guestmount) {
1209		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1210		if (items <= 0)
1211			return -ENOENT;
1212		for (i = 0; i < items; i++) {
1213			if (!isdigit(namelist[i]->d_name[0])) {
1214				/* Filter out . and .. */
1215				continue;
1216			}
1217			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1218			if ((*endp != '\0') ||
1219			    (endp == namelist[i]->d_name) ||
1220			    (errno == ERANGE)) {
1221				pr_debug("invalid directory (%s). Skipping.\n",
1222					 namelist[i]->d_name);
1223				continue;
1224			}
1225			sprintf(path, "%s/%s/proc/kallsyms",
1226				symbol_conf.guestmount,
1227				namelist[i]->d_name);
1228			ret = access(path, R_OK);
1229			if (ret) {
1230				pr_debug("Can't access file %s\n", path);
1231				goto failure;
1232			}
1233			machines__create_kernel_maps(machines, pid);
1234		}
1235failure:
1236		free(namelist);
1237	}
1238
1239	return ret;
1240}
1241
1242void machines__destroy_kernel_maps(struct machines *machines)
1243{
1244	struct rb_node *next = rb_first_cached(&machines->guests);
1245
1246	machine__destroy_kernel_maps(&machines->host);
1247
1248	while (next) {
1249		struct machine *pos = rb_entry(next, struct machine, rb_node);
1250
1251		next = rb_next(&pos->rb_node);
1252		rb_erase_cached(&pos->rb_node, &machines->guests);
1253		machine__delete(pos);
1254	}
1255}
1256
1257int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1258{
1259	struct machine *machine = machines__findnew(machines, pid);
1260
1261	if (machine == NULL)
1262		return -1;
1263
1264	return machine__create_kernel_maps(machine);
1265}
1266
1267int machine__load_kallsyms(struct machine *machine, const char *filename)
1268{
1269	struct map *map = machine__kernel_map(machine);
1270	int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1271
1272	if (ret > 0) {
1273		dso__set_loaded(map->dso);
1274		/*
1275		 * Since /proc/kallsyms will have multiple sessions for the
1276		 * kernel, with modules between them, fixup the end of all
1277		 * sections.
1278		 */
1279		maps__fixup_end(&machine->kmaps);
1280	}
1281
1282	return ret;
1283}
1284
1285int machine__load_vmlinux_path(struct machine *machine)
1286{
1287	struct map *map = machine__kernel_map(machine);
1288	int ret = dso__load_vmlinux_path(map->dso, map);
1289
1290	if (ret > 0)
1291		dso__set_loaded(map->dso);
1292
1293	return ret;
1294}
1295
1296static char *get_kernel_version(const char *root_dir)
1297{
1298	char version[PATH_MAX];
1299	FILE *file;
1300	char *name, *tmp;
1301	const char *prefix = "Linux version ";
1302
1303	sprintf(version, "%s/proc/version", root_dir);
1304	file = fopen(version, "r");
1305	if (!file)
1306		return NULL;
1307
1308	tmp = fgets(version, sizeof(version), file);
1309	fclose(file);
1310	if (!tmp)
1311		return NULL;
1312
1313	name = strstr(version, prefix);
1314	if (!name)
1315		return NULL;
1316	name += strlen(prefix);
1317	tmp = strchr(name, ' ');
1318	if (tmp)
1319		*tmp = '\0';
1320
1321	return strdup(name);
1322}
1323
1324static bool is_kmod_dso(struct dso *dso)
1325{
1326	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1327	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1328}
1329
1330static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
 
1331{
1332	char *long_name;
1333	struct map *map = maps__find_by_name(maps, m->name);
1334
1335	if (map == NULL)
1336		return 0;
1337
1338	long_name = strdup(path);
1339	if (long_name == NULL)
1340		return -ENOMEM;
1341
1342	dso__set_long_name(map->dso, long_name, true);
1343	dso__kernel_module_get_build_id(map->dso, "");
1344
1345	/*
1346	 * Full name could reveal us kmod compression, so
1347	 * we need to update the symtab_type if needed.
1348	 */
1349	if (m->comp && is_kmod_dso(map->dso)) {
1350		map->dso->symtab_type++;
1351		map->dso->comp = m->comp;
1352	}
1353
1354	return 0;
1355}
1356
1357static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
 
1358{
1359	struct dirent *dent;
1360	DIR *dir = opendir(dir_name);
1361	int ret = 0;
1362
1363	if (!dir) {
1364		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1365		return -1;
1366	}
1367
1368	while ((dent = readdir(dir)) != NULL) {
1369		char path[PATH_MAX];
1370		struct stat st;
1371
1372		/*sshfs might return bad dent->d_type, so we have to stat*/
1373		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1374		if (stat(path, &st))
1375			continue;
1376
1377		if (S_ISDIR(st.st_mode)) {
1378			if (!strcmp(dent->d_name, ".") ||
1379			    !strcmp(dent->d_name, ".."))
1380				continue;
1381
1382			/* Do not follow top-level source and build symlinks */
1383			if (depth == 0) {
1384				if (!strcmp(dent->d_name, "source") ||
1385				    !strcmp(dent->d_name, "build"))
1386					continue;
1387			}
1388
1389			ret = maps__set_modules_path_dir(maps, path, depth + 1);
 
1390			if (ret < 0)
1391				goto out;
1392		} else {
1393			struct kmod_path m;
1394
1395			ret = kmod_path__parse_name(&m, dent->d_name);
1396			if (ret)
1397				goto out;
1398
1399			if (m.kmod)
1400				ret = maps__set_module_path(maps, path, &m);
1401
1402			zfree(&m.name);
1403
1404			if (ret)
1405				goto out;
1406		}
1407	}
1408
1409out:
1410	closedir(dir);
1411	return ret;
1412}
1413
1414static int machine__set_modules_path(struct machine *machine)
1415{
1416	char *version;
1417	char modules_path[PATH_MAX];
1418
1419	version = get_kernel_version(machine->root_dir);
1420	if (!version)
1421		return -1;
1422
1423	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1424		 machine->root_dir, version);
1425	free(version);
1426
1427	return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1428}
1429int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1430				u64 *size __maybe_unused,
1431				const char *name __maybe_unused)
1432{
1433	return 0;
1434}
1435
1436static int machine__create_module(void *arg, const char *name, u64 start,
1437				  u64 size)
1438{
1439	struct machine *machine = arg;
1440	struct map *map;
1441
1442	if (arch__fix_module_text_start(&start, &size, name) < 0)
1443		return -1;
1444
1445	map = machine__addnew_module_map(machine, start, name);
1446	if (map == NULL)
1447		return -1;
1448	map->end = start + size;
1449
1450	dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1451
1452	return 0;
1453}
1454
1455static int machine__create_modules(struct machine *machine)
1456{
1457	const char *modules;
1458	char path[PATH_MAX];
1459
1460	if (machine__is_default_guest(machine)) {
1461		modules = symbol_conf.default_guest_modules;
1462	} else {
1463		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1464		modules = path;
1465	}
1466
1467	if (symbol__restricted_filename(modules, "/proc/modules"))
1468		return -1;
1469
1470	if (modules__parse(modules, machine, machine__create_module))
1471		return -1;
1472
1473	if (!machine__set_modules_path(machine))
1474		return 0;
1475
1476	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1477
1478	return 0;
1479}
1480
1481static void machine__set_kernel_mmap(struct machine *machine,
1482				     u64 start, u64 end)
1483{
1484	machine->vmlinux_map->start = start;
1485	machine->vmlinux_map->end   = end;
1486	/*
1487	 * Be a bit paranoid here, some perf.data file came with
1488	 * a zero sized synthesized MMAP event for the kernel.
1489	 */
1490	if (start == 0 && end == 0)
1491		machine->vmlinux_map->end = ~0ULL;
1492}
1493
1494static void machine__update_kernel_mmap(struct machine *machine,
1495				     u64 start, u64 end)
1496{
1497	struct map *map = machine__kernel_map(machine);
1498
1499	map__get(map);
1500	maps__remove(&machine->kmaps, map);
1501
1502	machine__set_kernel_mmap(machine, start, end);
1503
1504	maps__insert(&machine->kmaps, map);
1505	map__put(map);
1506}
1507
1508int machine__create_kernel_maps(struct machine *machine)
1509{
1510	struct dso *kernel = machine__get_kernel(machine);
1511	const char *name = NULL;
1512	struct map *map;
1513	u64 start = 0, end = ~0ULL;
1514	int ret;
1515
1516	if (kernel == NULL)
1517		return -1;
1518
1519	ret = __machine__create_kernel_maps(machine, kernel);
1520	if (ret < 0)
1521		goto out_put;
1522
1523	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1524		if (machine__is_host(machine))
1525			pr_debug("Problems creating module maps, "
1526				 "continuing anyway...\n");
1527		else
1528			pr_debug("Problems creating module maps for guest %d, "
1529				 "continuing anyway...\n", machine->pid);
1530	}
1531
1532	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1533		if (name &&
1534		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1535			machine__destroy_kernel_maps(machine);
1536			ret = -1;
1537			goto out_put;
1538		}
1539
1540		/*
1541		 * we have a real start address now, so re-order the kmaps
1542		 * assume it's the last in the kmaps
1543		 */
1544		machine__update_kernel_mmap(machine, start, end);
1545	}
1546
1547	if (machine__create_extra_kernel_maps(machine, kernel))
1548		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1549
1550	if (end == ~0ULL) {
1551		/* update end address of the kernel map using adjacent module address */
1552		map = map__next(machine__kernel_map(machine));
1553		if (map)
1554			machine__set_kernel_mmap(machine, start, map->start);
1555	}
1556
1557out_put:
1558	dso__put(kernel);
1559	return ret;
1560}
1561
1562static bool machine__uses_kcore(struct machine *machine)
1563{
1564	struct dso *dso;
1565
1566	list_for_each_entry(dso, &machine->dsos.head, node) {
1567		if (dso__is_kcore(dso))
1568			return true;
1569	}
1570
1571	return false;
1572}
1573
1574static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1575					     union perf_event *event)
1576{
1577	return machine__is(machine, "x86_64") &&
1578	       is_entry_trampoline(event->mmap.filename);
1579}
1580
1581static int machine__process_extra_kernel_map(struct machine *machine,
1582					     union perf_event *event)
1583{
1584	struct dso *kernel = machine__kernel_dso(machine);
 
1585	struct extra_kernel_map xm = {
1586		.start = event->mmap.start,
1587		.end   = event->mmap.start + event->mmap.len,
1588		.pgoff = event->mmap.pgoff,
1589	};
1590
1591	if (kernel == NULL)
1592		return -1;
1593
1594	strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1595
1596	return machine__create_extra_kernel_map(machine, kernel, &xm);
1597}
1598
1599static int machine__process_kernel_mmap_event(struct machine *machine,
1600					      union perf_event *event)
1601{
1602	struct map *map;
1603	enum dso_space_type dso_space;
1604	bool is_kernel_mmap;
1605
1606	/* If we have maps from kcore then we do not need or want any others */
1607	if (machine__uses_kcore(machine))
1608		return 0;
1609
1610	if (machine__is_host(machine))
1611		dso_space = DSO_SPACE__KERNEL;
1612	else
1613		dso_space = DSO_SPACE__KERNEL_GUEST;
1614
1615	is_kernel_mmap = memcmp(event->mmap.filename,
1616				machine->mmap_name,
1617				strlen(machine->mmap_name) - 1) == 0;
1618	if (event->mmap.filename[0] == '/' ||
1619	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1620		map = machine__addnew_module_map(machine, event->mmap.start,
1621						 event->mmap.filename);
1622		if (map == NULL)
1623			goto out_problem;
1624
1625		map->end = map->start + event->mmap.len;
1626	} else if (is_kernel_mmap) {
1627		const char *symbol_name = (event->mmap.filename +
1628				strlen(machine->mmap_name));
1629		/*
1630		 * Should be there already, from the build-id table in
1631		 * the header.
1632		 */
1633		struct dso *kernel = NULL;
1634		struct dso *dso;
1635
1636		down_read(&machine->dsos.lock);
1637
1638		list_for_each_entry(dso, &machine->dsos.head, node) {
1639
1640			/*
1641			 * The cpumode passed to is_kernel_module is not the
1642			 * cpumode of *this* event. If we insist on passing
1643			 * correct cpumode to is_kernel_module, we should
1644			 * record the cpumode when we adding this dso to the
1645			 * linked list.
1646			 *
1647			 * However we don't really need passing correct
1648			 * cpumode.  We know the correct cpumode must be kernel
1649			 * mode (if not, we should not link it onto kernel_dsos
1650			 * list).
1651			 *
1652			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1653			 * is_kernel_module() treats it as a kernel cpumode.
1654			 */
1655
1656			if (!dso->kernel ||
1657			    is_kernel_module(dso->long_name,
1658					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1659				continue;
1660
1661
1662			kernel = dso;
1663			break;
1664		}
1665
1666		up_read(&machine->dsos.lock);
1667
1668		if (kernel == NULL)
1669			kernel = machine__findnew_dso(machine, machine->mmap_name);
1670		if (kernel == NULL)
1671			goto out_problem;
1672
1673		kernel->kernel = dso_space;
1674		if (__machine__create_kernel_maps(machine, kernel) < 0) {
1675			dso__put(kernel);
1676			goto out_problem;
1677		}
1678
1679		if (strstr(kernel->long_name, "vmlinux"))
1680			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1681
1682		machine__update_kernel_mmap(machine, event->mmap.start,
1683					 event->mmap.start + event->mmap.len);
1684
1685		/*
1686		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1687		 * symbol. Effectively having zero here means that at record
1688		 * time /proc/sys/kernel/kptr_restrict was non zero.
1689		 */
1690		if (event->mmap.pgoff != 0) {
1691			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1692							symbol_name,
1693							event->mmap.pgoff);
1694		}
1695
1696		if (machine__is_default_guest(machine)) {
1697			/*
1698			 * preload dso of guest kernel and modules
1699			 */
1700			dso__load(kernel, machine__kernel_map(machine));
1701		}
1702	} else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1703		return machine__process_extra_kernel_map(machine, event);
1704	}
1705	return 0;
1706out_problem:
1707	return -1;
1708}
1709
1710int machine__process_mmap2_event(struct machine *machine,
1711				 union perf_event *event,
1712				 struct perf_sample *sample)
1713{
1714	struct thread *thread;
1715	struct map *map;
1716	struct dso_id dso_id = {
1717		.maj = event->mmap2.maj,
1718		.min = event->mmap2.min,
1719		.ino = event->mmap2.ino,
1720		.ino_generation = event->mmap2.ino_generation,
1721	};
1722	int ret = 0;
1723
1724	if (dump_trace)
1725		perf_event__fprintf_mmap2(event, stdout);
1726
1727	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1728	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1729		ret = machine__process_kernel_mmap_event(machine, event);
1730		if (ret < 0)
1731			goto out_problem;
1732		return 0;
1733	}
1734
1735	thread = machine__findnew_thread(machine, event->mmap2.pid,
1736					event->mmap2.tid);
1737	if (thread == NULL)
1738		goto out_problem;
1739
1740	map = map__new(machine, event->mmap2.start,
1741			event->mmap2.len, event->mmap2.pgoff,
1742			&dso_id, event->mmap2.prot,
 
 
 
1743			event->mmap2.flags,
1744			event->mmap2.filename, thread);
1745
1746	if (map == NULL)
1747		goto out_problem_map;
1748
1749	ret = thread__insert_map(thread, map);
1750	if (ret)
1751		goto out_problem_insert;
1752
1753	thread__put(thread);
1754	map__put(map);
1755	return 0;
1756
1757out_problem_insert:
1758	map__put(map);
1759out_problem_map:
1760	thread__put(thread);
1761out_problem:
1762	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1763	return 0;
1764}
1765
1766int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1767				struct perf_sample *sample)
1768{
1769	struct thread *thread;
1770	struct map *map;
1771	u32 prot = 0;
1772	int ret = 0;
1773
1774	if (dump_trace)
1775		perf_event__fprintf_mmap(event, stdout);
1776
1777	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1778	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1779		ret = machine__process_kernel_mmap_event(machine, event);
1780		if (ret < 0)
1781			goto out_problem;
1782		return 0;
1783	}
1784
1785	thread = machine__findnew_thread(machine, event->mmap.pid,
1786					 event->mmap.tid);
1787	if (thread == NULL)
1788		goto out_problem;
1789
1790	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1791		prot = PROT_EXEC;
1792
1793	map = map__new(machine, event->mmap.start,
1794			event->mmap.len, event->mmap.pgoff,
1795			NULL, prot, 0, event->mmap.filename, thread);
 
 
1796
1797	if (map == NULL)
1798		goto out_problem_map;
1799
1800	ret = thread__insert_map(thread, map);
1801	if (ret)
1802		goto out_problem_insert;
1803
1804	thread__put(thread);
1805	map__put(map);
1806	return 0;
1807
1808out_problem_insert:
1809	map__put(map);
1810out_problem_map:
1811	thread__put(thread);
1812out_problem:
1813	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1814	return 0;
1815}
1816
1817static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1818{
1819	struct threads *threads = machine__threads(machine, th->tid);
1820
1821	if (threads->last_match == th)
1822		threads__set_last_match(threads, NULL);
1823
1824	if (lock)
1825		down_write(&threads->lock);
1826
1827	BUG_ON(refcount_read(&th->refcnt) == 0);
1828
1829	rb_erase_cached(&th->rb_node, &threads->entries);
1830	RB_CLEAR_NODE(&th->rb_node);
1831	--threads->nr;
1832	/*
1833	 * Move it first to the dead_threads list, then drop the reference,
1834	 * if this is the last reference, then the thread__delete destructor
1835	 * will be called and we will remove it from the dead_threads list.
1836	 */
1837	list_add_tail(&th->node, &threads->dead);
1838
1839	/*
1840	 * We need to do the put here because if this is the last refcount,
1841	 * then we will be touching the threads->dead head when removing the
1842	 * thread.
1843	 */
1844	thread__put(th);
1845
1846	if (lock)
1847		up_write(&threads->lock);
1848}
1849
1850void machine__remove_thread(struct machine *machine, struct thread *th)
1851{
1852	return __machine__remove_thread(machine, th, true);
1853}
1854
1855int machine__process_fork_event(struct machine *machine, union perf_event *event,
1856				struct perf_sample *sample)
1857{
1858	struct thread *thread = machine__find_thread(machine,
1859						     event->fork.pid,
1860						     event->fork.tid);
1861	struct thread *parent = machine__findnew_thread(machine,
1862							event->fork.ppid,
1863							event->fork.ptid);
1864	bool do_maps_clone = true;
1865	int err = 0;
1866
1867	if (dump_trace)
1868		perf_event__fprintf_task(event, stdout);
1869
1870	/*
1871	 * There may be an existing thread that is not actually the parent,
1872	 * either because we are processing events out of order, or because the
1873	 * (fork) event that would have removed the thread was lost. Assume the
1874	 * latter case and continue on as best we can.
1875	 */
1876	if (parent->pid_ != (pid_t)event->fork.ppid) {
1877		dump_printf("removing erroneous parent thread %d/%d\n",
1878			    parent->pid_, parent->tid);
1879		machine__remove_thread(machine, parent);
1880		thread__put(parent);
1881		parent = machine__findnew_thread(machine, event->fork.ppid,
1882						 event->fork.ptid);
1883	}
1884
1885	/* if a thread currently exists for the thread id remove it */
1886	if (thread != NULL) {
1887		machine__remove_thread(machine, thread);
1888		thread__put(thread);
1889	}
1890
1891	thread = machine__findnew_thread(machine, event->fork.pid,
1892					 event->fork.tid);
1893	/*
1894	 * When synthesizing FORK events, we are trying to create thread
1895	 * objects for the already running tasks on the machine.
1896	 *
1897	 * Normally, for a kernel FORK event, we want to clone the parent's
1898	 * maps because that is what the kernel just did.
1899	 *
1900	 * But when synthesizing, this should not be done.  If we do, we end up
1901	 * with overlapping maps as we process the sythesized MMAP2 events that
1902	 * get delivered shortly thereafter.
1903	 *
1904	 * Use the FORK event misc flags in an internal way to signal this
1905	 * situation, so we can elide the map clone when appropriate.
1906	 */
1907	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1908		do_maps_clone = false;
1909
1910	if (thread == NULL || parent == NULL ||
1911	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1912		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1913		err = -1;
1914	}
1915	thread__put(thread);
1916	thread__put(parent);
1917
1918	return err;
1919}
1920
1921int machine__process_exit_event(struct machine *machine, union perf_event *event,
1922				struct perf_sample *sample __maybe_unused)
1923{
1924	struct thread *thread = machine__find_thread(machine,
1925						     event->fork.pid,
1926						     event->fork.tid);
1927
1928	if (dump_trace)
1929		perf_event__fprintf_task(event, stdout);
1930
1931	if (thread != NULL) {
1932		thread__exited(thread);
1933		thread__put(thread);
1934	}
1935
1936	return 0;
1937}
1938
1939int machine__process_event(struct machine *machine, union perf_event *event,
1940			   struct perf_sample *sample)
1941{
1942	int ret;
1943
1944	switch (event->header.type) {
1945	case PERF_RECORD_COMM:
1946		ret = machine__process_comm_event(machine, event, sample); break;
1947	case PERF_RECORD_MMAP:
1948		ret = machine__process_mmap_event(machine, event, sample); break;
1949	case PERF_RECORD_NAMESPACES:
1950		ret = machine__process_namespaces_event(machine, event, sample); break;
1951	case PERF_RECORD_CGROUP:
1952		ret = machine__process_cgroup_event(machine, event, sample); break;
1953	case PERF_RECORD_MMAP2:
1954		ret = machine__process_mmap2_event(machine, event, sample); break;
1955	case PERF_RECORD_FORK:
1956		ret = machine__process_fork_event(machine, event, sample); break;
1957	case PERF_RECORD_EXIT:
1958		ret = machine__process_exit_event(machine, event, sample); break;
1959	case PERF_RECORD_LOST:
1960		ret = machine__process_lost_event(machine, event, sample); break;
1961	case PERF_RECORD_AUX:
1962		ret = machine__process_aux_event(machine, event); break;
1963	case PERF_RECORD_ITRACE_START:
1964		ret = machine__process_itrace_start_event(machine, event); break;
1965	case PERF_RECORD_LOST_SAMPLES:
1966		ret = machine__process_lost_samples_event(machine, event, sample); break;
1967	case PERF_RECORD_SWITCH:
1968	case PERF_RECORD_SWITCH_CPU_WIDE:
1969		ret = machine__process_switch_event(machine, event); break;
1970	case PERF_RECORD_KSYMBOL:
1971		ret = machine__process_ksymbol(machine, event, sample); break;
1972	case PERF_RECORD_BPF_EVENT:
1973		ret = machine__process_bpf(machine, event, sample); break;
1974	case PERF_RECORD_TEXT_POKE:
1975		ret = machine__process_text_poke(machine, event, sample); break;
1976	default:
1977		ret = -1;
1978		break;
1979	}
1980
1981	return ret;
1982}
1983
1984static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1985{
1986	if (!regexec(regex, sym->name, 0, NULL, 0))
1987		return 1;
1988	return 0;
1989}
1990
1991static void ip__resolve_ams(struct thread *thread,
1992			    struct addr_map_symbol *ams,
1993			    u64 ip)
1994{
1995	struct addr_location al;
1996
1997	memset(&al, 0, sizeof(al));
1998	/*
1999	 * We cannot use the header.misc hint to determine whether a
2000	 * branch stack address is user, kernel, guest, hypervisor.
2001	 * Branches may straddle the kernel/user/hypervisor boundaries.
2002	 * Thus, we have to try consecutively until we find a match
2003	 * or else, the symbol is unknown
2004	 */
2005	thread__find_cpumode_addr_location(thread, ip, &al);
2006
2007	ams->addr = ip;
2008	ams->al_addr = al.addr;
2009	ams->ms.maps = al.maps;
2010	ams->ms.sym = al.sym;
2011	ams->ms.map = al.map;
2012	ams->phys_addr = 0;
2013}
2014
2015static void ip__resolve_data(struct thread *thread,
2016			     u8 m, struct addr_map_symbol *ams,
2017			     u64 addr, u64 phys_addr)
2018{
2019	struct addr_location al;
2020
2021	memset(&al, 0, sizeof(al));
2022
2023	thread__find_symbol(thread, m, addr, &al);
2024
2025	ams->addr = addr;
2026	ams->al_addr = al.addr;
2027	ams->ms.maps = al.maps;
2028	ams->ms.sym = al.sym;
2029	ams->ms.map = al.map;
2030	ams->phys_addr = phys_addr;
2031}
2032
2033struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2034				     struct addr_location *al)
2035{
2036	struct mem_info *mi = mem_info__new();
2037
2038	if (!mi)
2039		return NULL;
2040
2041	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2042	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2043			 sample->addr, sample->phys_addr);
2044	mi->data_src.val = sample->data_src;
2045
2046	return mi;
2047}
2048
2049static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2050{
2051	struct map *map = ms->map;
2052	char *srcline = NULL;
2053
2054	if (!map || callchain_param.key == CCKEY_FUNCTION)
2055		return srcline;
2056
2057	srcline = srcline__tree_find(&map->dso->srclines, ip);
2058	if (!srcline) {
2059		bool show_sym = false;
2060		bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2061
2062		srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2063				      ms->sym, show_sym, show_addr, ip);
2064		srcline__tree_insert(&map->dso->srclines, ip, srcline);
2065	}
2066
2067	return srcline;
2068}
2069
2070struct iterations {
2071	int nr_loop_iter;
2072	u64 cycles;
2073};
2074
2075static int add_callchain_ip(struct thread *thread,
2076			    struct callchain_cursor *cursor,
2077			    struct symbol **parent,
2078			    struct addr_location *root_al,
2079			    u8 *cpumode,
2080			    u64 ip,
2081			    bool branch,
2082			    struct branch_flags *flags,
2083			    struct iterations *iter,
2084			    u64 branch_from)
2085{
2086	struct map_symbol ms;
2087	struct addr_location al;
2088	int nr_loop_iter = 0;
2089	u64 iter_cycles = 0;
2090	const char *srcline = NULL;
2091
2092	al.filtered = 0;
2093	al.sym = NULL;
2094	if (!cpumode) {
2095		thread__find_cpumode_addr_location(thread, ip, &al);
2096	} else {
2097		if (ip >= PERF_CONTEXT_MAX) {
2098			switch (ip) {
2099			case PERF_CONTEXT_HV:
2100				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2101				break;
2102			case PERF_CONTEXT_KERNEL:
2103				*cpumode = PERF_RECORD_MISC_KERNEL;
2104				break;
2105			case PERF_CONTEXT_USER:
2106				*cpumode = PERF_RECORD_MISC_USER;
2107				break;
2108			default:
2109				pr_debug("invalid callchain context: "
2110					 "%"PRId64"\n", (s64) ip);
2111				/*
2112				 * It seems the callchain is corrupted.
2113				 * Discard all.
2114				 */
2115				callchain_cursor_reset(cursor);
2116				return 1;
2117			}
2118			return 0;
2119		}
2120		thread__find_symbol(thread, *cpumode, ip, &al);
2121	}
2122
2123	if (al.sym != NULL) {
2124		if (perf_hpp_list.parent && !*parent &&
2125		    symbol__match_regex(al.sym, &parent_regex))
2126			*parent = al.sym;
2127		else if (have_ignore_callees && root_al &&
2128		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
2129			/* Treat this symbol as the root,
2130			   forgetting its callees. */
2131			*root_al = al;
2132			callchain_cursor_reset(cursor);
2133		}
2134	}
2135
2136	if (symbol_conf.hide_unresolved && al.sym == NULL)
2137		return 0;
2138
2139	if (iter) {
2140		nr_loop_iter = iter->nr_loop_iter;
2141		iter_cycles = iter->cycles;
2142	}
2143
2144	ms.maps = al.maps;
2145	ms.map = al.map;
2146	ms.sym = al.sym;
2147	srcline = callchain_srcline(&ms, al.addr);
2148	return callchain_cursor_append(cursor, ip, &ms,
2149				       branch, flags, nr_loop_iter,
2150				       iter_cycles, branch_from, srcline);
2151}
2152
2153struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2154					   struct addr_location *al)
2155{
2156	unsigned int i;
2157	const struct branch_stack *bs = sample->branch_stack;
2158	struct branch_entry *entries = perf_sample__branch_entries(sample);
2159	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2160
2161	if (!bi)
2162		return NULL;
2163
2164	for (i = 0; i < bs->nr; i++) {
2165		ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2166		ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2167		bi[i].flags = entries[i].flags;
2168	}
2169	return bi;
2170}
2171
2172static void save_iterations(struct iterations *iter,
2173			    struct branch_entry *be, int nr)
2174{
2175	int i;
2176
2177	iter->nr_loop_iter++;
2178	iter->cycles = 0;
2179
2180	for (i = 0; i < nr; i++)
2181		iter->cycles += be[i].flags.cycles;
2182}
2183
2184#define CHASHSZ 127
2185#define CHASHBITS 7
2186#define NO_ENTRY 0xff
2187
2188#define PERF_MAX_BRANCH_DEPTH 127
2189
2190/* Remove loops. */
2191static int remove_loops(struct branch_entry *l, int nr,
2192			struct iterations *iter)
2193{
2194	int i, j, off;
2195	unsigned char chash[CHASHSZ];
2196
2197	memset(chash, NO_ENTRY, sizeof(chash));
2198
2199	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2200
2201	for (i = 0; i < nr; i++) {
2202		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2203
2204		/* no collision handling for now */
2205		if (chash[h] == NO_ENTRY) {
2206			chash[h] = i;
2207		} else if (l[chash[h]].from == l[i].from) {
2208			bool is_loop = true;
2209			/* check if it is a real loop */
2210			off = 0;
2211			for (j = chash[h]; j < i && i + off < nr; j++, off++)
2212				if (l[j].from != l[i + off].from) {
2213					is_loop = false;
2214					break;
2215				}
2216			if (is_loop) {
2217				j = nr - (i + off);
2218				if (j > 0) {
2219					save_iterations(iter + i + off,
2220						l + i, off);
2221
2222					memmove(iter + i, iter + i + off,
2223						j * sizeof(*iter));
2224
2225					memmove(l + i, l + i + off,
2226						j * sizeof(*l));
2227				}
2228
2229				nr -= off;
2230			}
2231		}
2232	}
2233	return nr;
2234}
2235
2236static int lbr_callchain_add_kernel_ip(struct thread *thread,
2237				       struct callchain_cursor *cursor,
2238				       struct perf_sample *sample,
2239				       struct symbol **parent,
2240				       struct addr_location *root_al,
2241				       u64 branch_from,
2242				       bool callee, int end)
2243{
2244	struct ip_callchain *chain = sample->callchain;
2245	u8 cpumode = PERF_RECORD_MISC_USER;
2246	int err, i;
2247
2248	if (callee) {
2249		for (i = 0; i < end + 1; i++) {
2250			err = add_callchain_ip(thread, cursor, parent,
2251					       root_al, &cpumode, chain->ips[i],
2252					       false, NULL, NULL, branch_from);
2253			if (err)
2254				return err;
2255		}
2256		return 0;
2257	}
2258
2259	for (i = end; i >= 0; i--) {
2260		err = add_callchain_ip(thread, cursor, parent,
2261				       root_al, &cpumode, chain->ips[i],
2262				       false, NULL, NULL, branch_from);
2263		if (err)
2264			return err;
2265	}
2266
2267	return 0;
2268}
2269
2270static void save_lbr_cursor_node(struct thread *thread,
2271				 struct callchain_cursor *cursor,
2272				 int idx)
2273{
2274	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2275
2276	if (!lbr_stitch)
2277		return;
2278
2279	if (cursor->pos == cursor->nr) {
2280		lbr_stitch->prev_lbr_cursor[idx].valid = false;
2281		return;
2282	}
2283
2284	if (!cursor->curr)
2285		cursor->curr = cursor->first;
2286	else
2287		cursor->curr = cursor->curr->next;
2288	memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2289	       sizeof(struct callchain_cursor_node));
2290
2291	lbr_stitch->prev_lbr_cursor[idx].valid = true;
2292	cursor->pos++;
2293}
2294
2295static int lbr_callchain_add_lbr_ip(struct thread *thread,
2296				    struct callchain_cursor *cursor,
2297				    struct perf_sample *sample,
2298				    struct symbol **parent,
2299				    struct addr_location *root_al,
2300				    u64 *branch_from,
2301				    bool callee)
2302{
2303	struct branch_stack *lbr_stack = sample->branch_stack;
2304	struct branch_entry *entries = perf_sample__branch_entries(sample);
2305	u8 cpumode = PERF_RECORD_MISC_USER;
2306	int lbr_nr = lbr_stack->nr;
2307	struct branch_flags *flags;
2308	int err, i;
2309	u64 ip;
2310
2311	/*
2312	 * The curr and pos are not used in writing session. They are cleared
2313	 * in callchain_cursor_commit() when the writing session is closed.
2314	 * Using curr and pos to track the current cursor node.
2315	 */
2316	if (thread->lbr_stitch) {
2317		cursor->curr = NULL;
2318		cursor->pos = cursor->nr;
2319		if (cursor->nr) {
2320			cursor->curr = cursor->first;
2321			for (i = 0; i < (int)(cursor->nr - 1); i++)
2322				cursor->curr = cursor->curr->next;
2323		}
2324	}
2325
2326	if (callee) {
2327		/* Add LBR ip from first entries.to */
2328		ip = entries[0].to;
2329		flags = &entries[0].flags;
2330		*branch_from = entries[0].from;
2331		err = add_callchain_ip(thread, cursor, parent,
2332				       root_al, &cpumode, ip,
2333				       true, flags, NULL,
2334				       *branch_from);
2335		if (err)
2336			return err;
2337
2338		/*
2339		 * The number of cursor node increases.
2340		 * Move the current cursor node.
2341		 * But does not need to save current cursor node for entry 0.
2342		 * It's impossible to stitch the whole LBRs of previous sample.
2343		 */
2344		if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2345			if (!cursor->curr)
2346				cursor->curr = cursor->first;
2347			else
2348				cursor->curr = cursor->curr->next;
2349			cursor->pos++;
2350		}
2351
2352		/* Add LBR ip from entries.from one by one. */
2353		for (i = 0; i < lbr_nr; i++) {
2354			ip = entries[i].from;
2355			flags = &entries[i].flags;
2356			err = add_callchain_ip(thread, cursor, parent,
2357					       root_al, &cpumode, ip,
2358					       true, flags, NULL,
2359					       *branch_from);
2360			if (err)
2361				return err;
2362			save_lbr_cursor_node(thread, cursor, i);
2363		}
2364		return 0;
2365	}
2366
2367	/* Add LBR ip from entries.from one by one. */
2368	for (i = lbr_nr - 1; i >= 0; i--) {
2369		ip = entries[i].from;
2370		flags = &entries[i].flags;
2371		err = add_callchain_ip(thread, cursor, parent,
2372				       root_al, &cpumode, ip,
2373				       true, flags, NULL,
2374				       *branch_from);
2375		if (err)
2376			return err;
2377		save_lbr_cursor_node(thread, cursor, i);
2378	}
2379
2380	/* Add LBR ip from first entries.to */
2381	ip = entries[0].to;
2382	flags = &entries[0].flags;
2383	*branch_from = entries[0].from;
2384	err = add_callchain_ip(thread, cursor, parent,
2385			       root_al, &cpumode, ip,
2386			       true, flags, NULL,
2387			       *branch_from);
2388	if (err)
2389		return err;
2390
2391	return 0;
2392}
2393
2394static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2395					     struct callchain_cursor *cursor)
2396{
2397	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2398	struct callchain_cursor_node *cnode;
2399	struct stitch_list *stitch_node;
2400	int err;
2401
2402	list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2403		cnode = &stitch_node->cursor;
2404
2405		err = callchain_cursor_append(cursor, cnode->ip,
2406					      &cnode->ms,
2407					      cnode->branch,
2408					      &cnode->branch_flags,
2409					      cnode->nr_loop_iter,
2410					      cnode->iter_cycles,
2411					      cnode->branch_from,
2412					      cnode->srcline);
2413		if (err)
2414			return err;
2415	}
2416	return 0;
2417}
2418
2419static struct stitch_list *get_stitch_node(struct thread *thread)
2420{
2421	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2422	struct stitch_list *stitch_node;
2423
2424	if (!list_empty(&lbr_stitch->free_lists)) {
2425		stitch_node = list_first_entry(&lbr_stitch->free_lists,
2426					       struct stitch_list, node);
2427		list_del(&stitch_node->node);
2428
2429		return stitch_node;
2430	}
2431
2432	return malloc(sizeof(struct stitch_list));
2433}
2434
2435static bool has_stitched_lbr(struct thread *thread,
2436			     struct perf_sample *cur,
2437			     struct perf_sample *prev,
2438			     unsigned int max_lbr,
2439			     bool callee)
2440{
2441	struct branch_stack *cur_stack = cur->branch_stack;
2442	struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2443	struct branch_stack *prev_stack = prev->branch_stack;
2444	struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2445	struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2446	int i, j, nr_identical_branches = 0;
2447	struct stitch_list *stitch_node;
2448	u64 cur_base, distance;
2449
2450	if (!cur_stack || !prev_stack)
2451		return false;
2452
2453	/* Find the physical index of the base-of-stack for current sample. */
2454	cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2455
2456	distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2457						     (max_lbr + prev_stack->hw_idx - cur_base);
2458	/* Previous sample has shorter stack. Nothing can be stitched. */
2459	if (distance + 1 > prev_stack->nr)
2460		return false;
2461
2462	/*
2463	 * Check if there are identical LBRs between two samples.
2464	 * Identicall LBRs must have same from, to and flags values. Also,
2465	 * they have to be saved in the same LBR registers (same physical
2466	 * index).
2467	 *
2468	 * Starts from the base-of-stack of current sample.
2469	 */
2470	for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2471		if ((prev_entries[i].from != cur_entries[j].from) ||
2472		    (prev_entries[i].to != cur_entries[j].to) ||
2473		    (prev_entries[i].flags.value != cur_entries[j].flags.value))
2474			break;
2475		nr_identical_branches++;
2476	}
2477
2478	if (!nr_identical_branches)
2479		return false;
2480
2481	/*
2482	 * Save the LBRs between the base-of-stack of previous sample
2483	 * and the base-of-stack of current sample into lbr_stitch->lists.
2484	 * These LBRs will be stitched later.
2485	 */
2486	for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2487
2488		if (!lbr_stitch->prev_lbr_cursor[i].valid)
2489			continue;
2490
2491		stitch_node = get_stitch_node(thread);
2492		if (!stitch_node)
2493			return false;
2494
2495		memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2496		       sizeof(struct callchain_cursor_node));
2497
2498		if (callee)
2499			list_add(&stitch_node->node, &lbr_stitch->lists);
2500		else
2501			list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2502	}
2503
2504	return true;
2505}
2506
2507static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2508{
2509	if (thread->lbr_stitch)
2510		return true;
2511
2512	thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2513	if (!thread->lbr_stitch)
2514		goto err;
2515
2516	thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2517	if (!thread->lbr_stitch->prev_lbr_cursor)
2518		goto free_lbr_stitch;
2519
2520	INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2521	INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2522
2523	return true;
2524
2525free_lbr_stitch:
2526	zfree(&thread->lbr_stitch);
2527err:
2528	pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2529	thread->lbr_stitch_enable = false;
2530	return false;
2531}
2532
2533/*
2534 * Recolve LBR callstack chain sample
2535 * Return:
2536 * 1 on success get LBR callchain information
2537 * 0 no available LBR callchain information, should try fp
2538 * negative error code on other errors.
2539 */
2540static int resolve_lbr_callchain_sample(struct thread *thread,
2541					struct callchain_cursor *cursor,
2542					struct perf_sample *sample,
2543					struct symbol **parent,
2544					struct addr_location *root_al,
2545					int max_stack,
2546					unsigned int max_lbr)
2547{
2548	bool callee = (callchain_param.order == ORDER_CALLEE);
2549	struct ip_callchain *chain = sample->callchain;
2550	int chain_nr = min(max_stack, (int)chain->nr), i;
2551	struct lbr_stitch *lbr_stitch;
2552	bool stitched_lbr = false;
2553	u64 branch_from = 0;
2554	int err;
2555
2556	for (i = 0; i < chain_nr; i++) {
2557		if (chain->ips[i] == PERF_CONTEXT_USER)
2558			break;
2559	}
2560
2561	/* LBR only affects the user callchain */
2562	if (i == chain_nr)
2563		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2564
2565	if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
2566	    (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2567		lbr_stitch = thread->lbr_stitch;
2568
2569		stitched_lbr = has_stitched_lbr(thread, sample,
2570						&lbr_stitch->prev_sample,
2571						max_lbr, callee);
2572
2573		if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2574			list_replace_init(&lbr_stitch->lists,
2575					  &lbr_stitch->free_lists);
2576		}
2577		memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2578	}
2579
2580	if (callee) {
2581		/* Add kernel ip */
2582		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2583						  parent, root_al, branch_from,
2584						  true, i);
2585		if (err)
2586			goto error;
2587
2588		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2589					       root_al, &branch_from, true);
2590		if (err)
2591			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2592
2593		if (stitched_lbr) {
2594			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
 
 
2595			if (err)
2596				goto error;
2597		}
2598
2599	} else {
2600		if (stitched_lbr) {
2601			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2602			if (err)
2603				goto error;
2604		}
2605		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2606					       root_al, &branch_from, false);
2607		if (err)
2608			goto error;
2609
2610		/* Add kernel ip */
2611		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2612						  parent, root_al, branch_from,
2613						  false, i);
2614		if (err)
2615			goto error;
2616	}
2617	return 1;
2618
2619error:
2620	return (err < 0) ? err : 0;
2621}
2622
2623static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2624			     struct callchain_cursor *cursor,
2625			     struct symbol **parent,
2626			     struct addr_location *root_al,
2627			     u8 *cpumode, int ent)
2628{
2629	int err = 0;
2630
2631	while (--ent >= 0) {
2632		u64 ip = chain->ips[ent];
2633
2634		if (ip >= PERF_CONTEXT_MAX) {
2635			err = add_callchain_ip(thread, cursor, parent,
2636					       root_al, cpumode, ip,
2637					       false, NULL, NULL, 0);
2638			break;
2639		}
2640	}
2641	return err;
2642}
2643
2644static int thread__resolve_callchain_sample(struct thread *thread,
2645					    struct callchain_cursor *cursor,
2646					    struct evsel *evsel,
2647					    struct perf_sample *sample,
2648					    struct symbol **parent,
2649					    struct addr_location *root_al,
2650					    int max_stack)
2651{
2652	struct branch_stack *branch = sample->branch_stack;
2653	struct branch_entry *entries = perf_sample__branch_entries(sample);
2654	struct ip_callchain *chain = sample->callchain;
2655	int chain_nr = 0;
2656	u8 cpumode = PERF_RECORD_MISC_USER;
2657	int i, j, err, nr_entries;
2658	int skip_idx = -1;
2659	int first_call = 0;
2660
2661	if (chain)
2662		chain_nr = chain->nr;
2663
2664	if (evsel__has_branch_callstack(evsel)) {
2665		struct perf_env *env = evsel__env(evsel);
2666
2667		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2668						   root_al, max_stack,
2669						   !env ? 0 : env->max_branches);
2670		if (err)
2671			return (err < 0) ? err : 0;
2672	}
2673
2674	/*
2675	 * Based on DWARF debug information, some architectures skip
2676	 * a callchain entry saved by the kernel.
2677	 */
2678	skip_idx = arch_skip_callchain_idx(thread, chain);
2679
2680	/*
2681	 * Add branches to call stack for easier browsing. This gives
2682	 * more context for a sample than just the callers.
2683	 *
2684	 * This uses individual histograms of paths compared to the
2685	 * aggregated histograms the normal LBR mode uses.
2686	 *
2687	 * Limitations for now:
2688	 * - No extra filters
2689	 * - No annotations (should annotate somehow)
2690	 */
2691
2692	if (branch && callchain_param.branch_callstack) {
2693		int nr = min(max_stack, (int)branch->nr);
2694		struct branch_entry be[nr];
2695		struct iterations iter[nr];
2696
2697		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2698			pr_warning("corrupted branch chain. skipping...\n");
2699			goto check_calls;
2700		}
2701
2702		for (i = 0; i < nr; i++) {
2703			if (callchain_param.order == ORDER_CALLEE) {
2704				be[i] = entries[i];
2705
2706				if (chain == NULL)
2707					continue;
2708
2709				/*
2710				 * Check for overlap into the callchain.
2711				 * The return address is one off compared to
2712				 * the branch entry. To adjust for this
2713				 * assume the calling instruction is not longer
2714				 * than 8 bytes.
2715				 */
2716				if (i == skip_idx ||
2717				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
2718					first_call++;
2719				else if (be[i].from < chain->ips[first_call] &&
2720				    be[i].from >= chain->ips[first_call] - 8)
2721					first_call++;
2722			} else
2723				be[i] = entries[branch->nr - i - 1];
2724		}
2725
2726		memset(iter, 0, sizeof(struct iterations) * nr);
2727		nr = remove_loops(be, nr, iter);
2728
2729		for (i = 0; i < nr; i++) {
2730			err = add_callchain_ip(thread, cursor, parent,
2731					       root_al,
2732					       NULL, be[i].to,
2733					       true, &be[i].flags,
2734					       NULL, be[i].from);
2735
2736			if (!err)
2737				err = add_callchain_ip(thread, cursor, parent, root_al,
2738						       NULL, be[i].from,
2739						       true, &be[i].flags,
2740						       &iter[i], 0);
2741			if (err == -EINVAL)
2742				break;
2743			if (err)
2744				return err;
2745		}
2746
2747		if (chain_nr == 0)
2748			return 0;
2749
2750		chain_nr -= nr;
2751	}
2752
2753check_calls:
2754	if (chain && callchain_param.order != ORDER_CALLEE) {
2755		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2756					&cpumode, chain->nr - first_call);
2757		if (err)
2758			return (err < 0) ? err : 0;
2759	}
2760	for (i = first_call, nr_entries = 0;
2761	     i < chain_nr && nr_entries < max_stack; i++) {
2762		u64 ip;
2763
2764		if (callchain_param.order == ORDER_CALLEE)
2765			j = i;
2766		else
2767			j = chain->nr - i - 1;
2768
2769#ifdef HAVE_SKIP_CALLCHAIN_IDX
2770		if (j == skip_idx)
2771			continue;
2772#endif
2773		ip = chain->ips[j];
2774		if (ip < PERF_CONTEXT_MAX)
2775                       ++nr_entries;
2776		else if (callchain_param.order != ORDER_CALLEE) {
2777			err = find_prev_cpumode(chain, thread, cursor, parent,
2778						root_al, &cpumode, j);
2779			if (err)
2780				return (err < 0) ? err : 0;
2781			continue;
2782		}
2783
2784		err = add_callchain_ip(thread, cursor, parent,
2785				       root_al, &cpumode, ip,
2786				       false, NULL, NULL, 0);
2787
2788		if (err)
2789			return (err < 0) ? err : 0;
2790	}
2791
2792	return 0;
2793}
2794
2795static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
 
2796{
2797	struct symbol *sym = ms->sym;
2798	struct map *map = ms->map;
2799	struct inline_node *inline_node;
2800	struct inline_list *ilist;
2801	u64 addr;
2802	int ret = 1;
2803
2804	if (!symbol_conf.inline_name || !map || !sym)
2805		return ret;
2806
2807	addr = map__map_ip(map, ip);
2808	addr = map__rip_2objdump(map, addr);
2809
2810	inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2811	if (!inline_node) {
2812		inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2813		if (!inline_node)
2814			return ret;
2815		inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2816	}
2817
2818	list_for_each_entry(ilist, &inline_node->val, list) {
2819		struct map_symbol ilist_ms = {
2820			.maps = ms->maps,
2821			.map = map,
2822			.sym = ilist->symbol,
2823		};
2824		ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2825					      NULL, 0, 0, 0, ilist->srcline);
2826
2827		if (ret != 0)
2828			return ret;
2829	}
2830
2831	return ret;
2832}
2833
2834static int unwind_entry(struct unwind_entry *entry, void *arg)
2835{
2836	struct callchain_cursor *cursor = arg;
2837	const char *srcline = NULL;
2838	u64 addr = entry->ip;
2839
2840	if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2841		return 0;
2842
2843	if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2844		return 0;
2845
2846	/*
2847	 * Convert entry->ip from a virtual address to an offset in
2848	 * its corresponding binary.
2849	 */
2850	if (entry->ms.map)
2851		addr = map__map_ip(entry->ms.map, entry->ip);
2852
2853	srcline = callchain_srcline(&entry->ms, addr);
2854	return callchain_cursor_append(cursor, entry->ip, &entry->ms,
 
2855				       false, NULL, 0, 0, 0, srcline);
2856}
2857
2858static int thread__resolve_callchain_unwind(struct thread *thread,
2859					    struct callchain_cursor *cursor,
2860					    struct evsel *evsel,
2861					    struct perf_sample *sample,
2862					    int max_stack)
2863{
2864	/* Can we do dwarf post unwind? */
2865	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2866	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2867		return 0;
2868
2869	/* Bail out if nothing was captured. */
2870	if ((!sample->user_regs.regs) ||
2871	    (!sample->user_stack.size))
2872		return 0;
2873
2874	return unwind__get_entries(unwind_entry, cursor,
2875				   thread, sample, max_stack);
2876}
2877
2878int thread__resolve_callchain(struct thread *thread,
2879			      struct callchain_cursor *cursor,
2880			      struct evsel *evsel,
2881			      struct perf_sample *sample,
2882			      struct symbol **parent,
2883			      struct addr_location *root_al,
2884			      int max_stack)
2885{
2886	int ret = 0;
2887
2888	callchain_cursor_reset(cursor);
2889
2890	if (callchain_param.order == ORDER_CALLEE) {
2891		ret = thread__resolve_callchain_sample(thread, cursor,
2892						       evsel, sample,
2893						       parent, root_al,
2894						       max_stack);
2895		if (ret)
2896			return ret;
2897		ret = thread__resolve_callchain_unwind(thread, cursor,
2898						       evsel, sample,
2899						       max_stack);
2900	} else {
2901		ret = thread__resolve_callchain_unwind(thread, cursor,
2902						       evsel, sample,
2903						       max_stack);
2904		if (ret)
2905			return ret;
2906		ret = thread__resolve_callchain_sample(thread, cursor,
2907						       evsel, sample,
2908						       parent, root_al,
2909						       max_stack);
2910	}
2911
2912	return ret;
2913}
2914
2915int machine__for_each_thread(struct machine *machine,
2916			     int (*fn)(struct thread *thread, void *p),
2917			     void *priv)
2918{
2919	struct threads *threads;
2920	struct rb_node *nd;
2921	struct thread *thread;
2922	int rc = 0;
2923	int i;
2924
2925	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2926		threads = &machine->threads[i];
2927		for (nd = rb_first_cached(&threads->entries); nd;
2928		     nd = rb_next(nd)) {
2929			thread = rb_entry(nd, struct thread, rb_node);
2930			rc = fn(thread, priv);
2931			if (rc != 0)
2932				return rc;
2933		}
2934
2935		list_for_each_entry(thread, &threads->dead, node) {
2936			rc = fn(thread, priv);
2937			if (rc != 0)
2938				return rc;
2939		}
2940	}
2941	return rc;
2942}
2943
2944int machines__for_each_thread(struct machines *machines,
2945			      int (*fn)(struct thread *thread, void *p),
2946			      void *priv)
2947{
2948	struct rb_node *nd;
2949	int rc = 0;
2950
2951	rc = machine__for_each_thread(&machines->host, fn, priv);
2952	if (rc != 0)
2953		return rc;
2954
2955	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
2956		struct machine *machine = rb_entry(nd, struct machine, rb_node);
2957
2958		rc = machine__for_each_thread(machine, fn, priv);
2959		if (rc != 0)
2960			return rc;
2961	}
2962	return rc;
2963}
2964
2965pid_t machine__get_current_tid(struct machine *machine, int cpu)
2966{
2967	int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2968
2969	if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
2970		return -1;
2971
2972	return machine->current_tid[cpu];
2973}
2974
2975int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2976			     pid_t tid)
2977{
2978	struct thread *thread;
2979	int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2980
2981	if (cpu < 0)
2982		return -EINVAL;
2983
2984	if (!machine->current_tid) {
2985		int i;
2986
2987		machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
2988		if (!machine->current_tid)
2989			return -ENOMEM;
2990		for (i = 0; i < nr_cpus; i++)
2991			machine->current_tid[i] = -1;
2992	}
2993
2994	if (cpu >= nr_cpus) {
2995		pr_err("Requested CPU %d too large. ", cpu);
2996		pr_err("Consider raising MAX_NR_CPUS\n");
2997		return -EINVAL;
2998	}
2999
3000	machine->current_tid[cpu] = tid;
3001
3002	thread = machine__findnew_thread(machine, pid, tid);
3003	if (!thread)
3004		return -ENOMEM;
3005
3006	thread->cpu = cpu;
3007	thread__put(thread);
3008
3009	return 0;
3010}
3011
3012/*
3013 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
3014 * normalized arch is needed.
3015 */
3016bool machine__is(struct machine *machine, const char *arch)
3017{
3018	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3019}
3020
3021int machine__nr_cpus_avail(struct machine *machine)
3022{
3023	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3024}
3025
3026int machine__get_kernel_start(struct machine *machine)
3027{
3028	struct map *map = machine__kernel_map(machine);
3029	int err = 0;
3030
3031	/*
3032	 * The only addresses above 2^63 are kernel addresses of a 64-bit
3033	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
3034	 * all addresses including kernel addresses are less than 2^32.  In
3035	 * that case (32-bit system), if the kernel mapping is unknown, all
3036	 * addresses will be assumed to be in user space - see
3037	 * machine__kernel_ip().
3038	 */
3039	machine->kernel_start = 1ULL << 63;
3040	if (map) {
3041		err = map__load(map);
3042		/*
3043		 * On x86_64, PTI entry trampolines are less than the
3044		 * start of kernel text, but still above 2^63. So leave
3045		 * kernel_start = 1ULL << 63 for x86_64.
3046		 */
3047		if (!err && !machine__is(machine, "x86_64"))
3048			machine->kernel_start = map->start;
3049	}
3050	return err;
3051}
3052
3053u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3054{
3055	u8 addr_cpumode = cpumode;
3056	bool kernel_ip;
3057
3058	if (!machine->single_address_space)
3059		goto out;
3060
3061	kernel_ip = machine__kernel_ip(machine, addr);
3062	switch (cpumode) {
3063	case PERF_RECORD_MISC_KERNEL:
3064	case PERF_RECORD_MISC_USER:
3065		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3066					   PERF_RECORD_MISC_USER;
3067		break;
3068	case PERF_RECORD_MISC_GUEST_KERNEL:
3069	case PERF_RECORD_MISC_GUEST_USER:
3070		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3071					   PERF_RECORD_MISC_GUEST_USER;
3072		break;
3073	default:
3074		break;
3075	}
3076out:
3077	return addr_cpumode;
3078}
3079
3080struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3081{
3082	return dsos__findnew_id(&machine->dsos, filename, id);
3083}
3084
3085struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3086{
3087	return machine__findnew_dso_id(machine, filename, NULL);
3088}
3089
3090char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3091{
3092	struct machine *machine = vmachine;
3093	struct map *map;
3094	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3095
3096	if (sym == NULL)
3097		return NULL;
3098
3099	*modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
3100	*addrp = map->unmap_ip(map, sym->start);
3101	return sym->name;
3102}