Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <inttypes.h>
5#include <regex.h>
6#include <stdlib.h>
7#include "callchain.h"
8#include "debug.h"
9#include "dso.h"
10#include "env.h"
11#include "event.h"
12#include "evsel.h"
13#include "hist.h"
14#include "machine.h"
15#include "map.h"
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
19#include "path.h"
20#include "srcline.h"
21#include "symbol.h"
22#include "sort.h"
23#include "strlist.h"
24#include "target.h"
25#include "thread.h"
26#include "util.h"
27#include "vdso.h"
28#include <stdbool.h>
29#include <sys/types.h>
30#include <sys/stat.h>
31#include <unistd.h>
32#include "unwind.h"
33#include "linux/hash.h"
34#include "asm/bug.h"
35#include "bpf-event.h"
36#include <internal/lib.h> // page_size
37#include "cgroup.h"
38#include "arm64-frame-pointer-unwind-support.h"
39
40#include <linux/ctype.h>
41#include <symbol/kallsyms.h>
42#include <linux/mman.h>
43#include <linux/string.h>
44#include <linux/zalloc.h>
45
46static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
47 struct thread *th, bool lock);
48
49static struct dso *machine__kernel_dso(struct machine *machine)
50{
51 return map__dso(machine->vmlinux_map);
52}
53
54static void dsos__init(struct dsos *dsos)
55{
56 INIT_LIST_HEAD(&dsos->head);
57 dsos->root = RB_ROOT;
58 init_rwsem(&dsos->lock);
59}
60
61static void machine__threads_init(struct machine *machine)
62{
63 int i;
64
65 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
66 struct threads *threads = &machine->threads[i];
67 threads->entries = RB_ROOT_CACHED;
68 init_rwsem(&threads->lock);
69 threads->nr = 0;
70 threads->last_match = NULL;
71 }
72}
73
74static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd)
75{
76 int to_find = (int) *((pid_t *)key);
77
78 return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
79}
80
81static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
82 struct rb_root *tree)
83{
84 pid_t to_find = thread__tid(th);
85 struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid);
86
87 return rb_entry(nd, struct thread_rb_node, rb_node);
88}
89
90static int machine__set_mmap_name(struct machine *machine)
91{
92 if (machine__is_host(machine))
93 machine->mmap_name = strdup("[kernel.kallsyms]");
94 else if (machine__is_default_guest(machine))
95 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
96 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
97 machine->pid) < 0)
98 machine->mmap_name = NULL;
99
100 return machine->mmap_name ? 0 : -ENOMEM;
101}
102
103static void thread__set_guest_comm(struct thread *thread, pid_t pid)
104{
105 char comm[64];
106
107 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
108 thread__set_comm(thread, comm, 0);
109}
110
111int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
112{
113 int err = -ENOMEM;
114
115 memset(machine, 0, sizeof(*machine));
116 machine->kmaps = maps__new(machine);
117 if (machine->kmaps == NULL)
118 return -ENOMEM;
119
120 RB_CLEAR_NODE(&machine->rb_node);
121 dsos__init(&machine->dsos);
122
123 machine__threads_init(machine);
124
125 machine->vdso_info = NULL;
126 machine->env = NULL;
127
128 machine->pid = pid;
129
130 machine->id_hdr_size = 0;
131 machine->kptr_restrict_warned = false;
132 machine->comm_exec = false;
133 machine->kernel_start = 0;
134 machine->vmlinux_map = NULL;
135
136 machine->root_dir = strdup(root_dir);
137 if (machine->root_dir == NULL)
138 goto out;
139
140 if (machine__set_mmap_name(machine))
141 goto out;
142
143 if (pid != HOST_KERNEL_ID) {
144 struct thread *thread = machine__findnew_thread(machine, -1,
145 pid);
146
147 if (thread == NULL)
148 goto out;
149
150 thread__set_guest_comm(thread, pid);
151 thread__put(thread);
152 }
153
154 machine->current_tid = NULL;
155 err = 0;
156
157out:
158 if (err) {
159 zfree(&machine->kmaps);
160 zfree(&machine->root_dir);
161 zfree(&machine->mmap_name);
162 }
163 return 0;
164}
165
166struct machine *machine__new_host(void)
167{
168 struct machine *machine = malloc(sizeof(*machine));
169
170 if (machine != NULL) {
171 machine__init(machine, "", HOST_KERNEL_ID);
172
173 if (machine__create_kernel_maps(machine) < 0)
174 goto out_delete;
175 }
176
177 return machine;
178out_delete:
179 free(machine);
180 return NULL;
181}
182
183struct machine *machine__new_kallsyms(void)
184{
185 struct machine *machine = machine__new_host();
186 /*
187 * FIXME:
188 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
189 * ask for not using the kcore parsing code, once this one is fixed
190 * to create a map per module.
191 */
192 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
193 machine__delete(machine);
194 machine = NULL;
195 }
196
197 return machine;
198}
199
200static void dsos__purge(struct dsos *dsos)
201{
202 struct dso *pos, *n;
203
204 down_write(&dsos->lock);
205
206 list_for_each_entry_safe(pos, n, &dsos->head, node) {
207 RB_CLEAR_NODE(&pos->rb_node);
208 pos->root = NULL;
209 list_del_init(&pos->node);
210 dso__put(pos);
211 }
212
213 up_write(&dsos->lock);
214}
215
216static void dsos__exit(struct dsos *dsos)
217{
218 dsos__purge(dsos);
219 exit_rwsem(&dsos->lock);
220}
221
222void machine__delete_threads(struct machine *machine)
223{
224 struct rb_node *nd;
225 int i;
226
227 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
228 struct threads *threads = &machine->threads[i];
229 down_write(&threads->lock);
230 nd = rb_first_cached(&threads->entries);
231 while (nd) {
232 struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
233
234 nd = rb_next(nd);
235 __machine__remove_thread(machine, trb, trb->thread, false);
236 }
237 up_write(&threads->lock);
238 }
239}
240
241void machine__exit(struct machine *machine)
242{
243 int i;
244
245 if (machine == NULL)
246 return;
247
248 machine__destroy_kernel_maps(machine);
249 maps__zput(machine->kmaps);
250 dsos__exit(&machine->dsos);
251 machine__exit_vdso(machine);
252 zfree(&machine->root_dir);
253 zfree(&machine->mmap_name);
254 zfree(&machine->current_tid);
255 zfree(&machine->kallsyms_filename);
256
257 machine__delete_threads(machine);
258 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
259 struct threads *threads = &machine->threads[i];
260
261 exit_rwsem(&threads->lock);
262 }
263}
264
265void machine__delete(struct machine *machine)
266{
267 if (machine) {
268 machine__exit(machine);
269 free(machine);
270 }
271}
272
273void machines__init(struct machines *machines)
274{
275 machine__init(&machines->host, "", HOST_KERNEL_ID);
276 machines->guests = RB_ROOT_CACHED;
277}
278
279void machines__exit(struct machines *machines)
280{
281 machine__exit(&machines->host);
282 /* XXX exit guest */
283}
284
285struct machine *machines__add(struct machines *machines, pid_t pid,
286 const char *root_dir)
287{
288 struct rb_node **p = &machines->guests.rb_root.rb_node;
289 struct rb_node *parent = NULL;
290 struct machine *pos, *machine = malloc(sizeof(*machine));
291 bool leftmost = true;
292
293 if (machine == NULL)
294 return NULL;
295
296 if (machine__init(machine, root_dir, pid) != 0) {
297 free(machine);
298 return NULL;
299 }
300
301 while (*p != NULL) {
302 parent = *p;
303 pos = rb_entry(parent, struct machine, rb_node);
304 if (pid < pos->pid)
305 p = &(*p)->rb_left;
306 else {
307 p = &(*p)->rb_right;
308 leftmost = false;
309 }
310 }
311
312 rb_link_node(&machine->rb_node, parent, p);
313 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
314
315 machine->machines = machines;
316
317 return machine;
318}
319
320void machines__set_comm_exec(struct machines *machines, bool comm_exec)
321{
322 struct rb_node *nd;
323
324 machines->host.comm_exec = comm_exec;
325
326 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
327 struct machine *machine = rb_entry(nd, struct machine, rb_node);
328
329 machine->comm_exec = comm_exec;
330 }
331}
332
333struct machine *machines__find(struct machines *machines, pid_t pid)
334{
335 struct rb_node **p = &machines->guests.rb_root.rb_node;
336 struct rb_node *parent = NULL;
337 struct machine *machine;
338 struct machine *default_machine = NULL;
339
340 if (pid == HOST_KERNEL_ID)
341 return &machines->host;
342
343 while (*p != NULL) {
344 parent = *p;
345 machine = rb_entry(parent, struct machine, rb_node);
346 if (pid < machine->pid)
347 p = &(*p)->rb_left;
348 else if (pid > machine->pid)
349 p = &(*p)->rb_right;
350 else
351 return machine;
352 if (!machine->pid)
353 default_machine = machine;
354 }
355
356 return default_machine;
357}
358
359struct machine *machines__findnew(struct machines *machines, pid_t pid)
360{
361 char path[PATH_MAX];
362 const char *root_dir = "";
363 struct machine *machine = machines__find(machines, pid);
364
365 if (machine && (machine->pid == pid))
366 goto out;
367
368 if ((pid != HOST_KERNEL_ID) &&
369 (pid != DEFAULT_GUEST_KERNEL_ID) &&
370 (symbol_conf.guestmount)) {
371 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
372 if (access(path, R_OK)) {
373 static struct strlist *seen;
374
375 if (!seen)
376 seen = strlist__new(NULL, NULL);
377
378 if (!strlist__has_entry(seen, path)) {
379 pr_err("Can't access file %s\n", path);
380 strlist__add(seen, path);
381 }
382 machine = NULL;
383 goto out;
384 }
385 root_dir = path;
386 }
387
388 machine = machines__add(machines, pid, root_dir);
389out:
390 return machine;
391}
392
393struct machine *machines__find_guest(struct machines *machines, pid_t pid)
394{
395 struct machine *machine = machines__find(machines, pid);
396
397 if (!machine)
398 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
399 return machine;
400}
401
402/*
403 * A common case for KVM test programs is that the test program acts as the
404 * hypervisor, creating, running and destroying the virtual machine, and
405 * providing the guest object code from its own object code. In this case,
406 * the VM is not running an OS, but only the functions loaded into it by the
407 * hypervisor test program, and conveniently, loaded at the same virtual
408 * addresses.
409 *
410 * Normally to resolve addresses, MMAP events are needed to map addresses
411 * back to the object code and debug symbols for that object code.
412 *
413 * Currently, there is no way to get such mapping information from guests
414 * but, in the scenario described above, the guest has the same mappings
415 * as the hypervisor, so support for that scenario can be achieved.
416 *
417 * To support that, copy the host thread's maps to the guest thread's maps.
418 * Note, we do not discover the guest until we encounter a guest event,
419 * which works well because it is not until then that we know that the host
420 * thread's maps have been set up.
421 *
422 * This function returns the guest thread. Apart from keeping the data
423 * structures sane, using a thread belonging to the guest machine, instead
424 * of the host thread, allows it to have its own comm (refer
425 * thread__set_guest_comm()).
426 */
427static struct thread *findnew_guest_code(struct machine *machine,
428 struct machine *host_machine,
429 pid_t pid)
430{
431 struct thread *host_thread;
432 struct thread *thread;
433 int err;
434
435 if (!machine)
436 return NULL;
437
438 thread = machine__findnew_thread(machine, -1, pid);
439 if (!thread)
440 return NULL;
441
442 /* Assume maps are set up if there are any */
443 if (maps__nr_maps(thread__maps(thread)))
444 return thread;
445
446 host_thread = machine__find_thread(host_machine, -1, pid);
447 if (!host_thread)
448 goto out_err;
449
450 thread__set_guest_comm(thread, pid);
451
452 /*
453 * Guest code can be found in hypervisor process at the same address
454 * so copy host maps.
455 */
456 err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
457 thread__put(host_thread);
458 if (err)
459 goto out_err;
460
461 return thread;
462
463out_err:
464 thread__zput(thread);
465 return NULL;
466}
467
468struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
469{
470 struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
471 struct machine *machine = machines__findnew(machines, pid);
472
473 return findnew_guest_code(machine, host_machine, pid);
474}
475
476struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
477{
478 struct machines *machines = machine->machines;
479 struct machine *host_machine;
480
481 if (!machines)
482 return NULL;
483
484 host_machine = machines__find(machines, HOST_KERNEL_ID);
485
486 return findnew_guest_code(machine, host_machine, pid);
487}
488
489void machines__process_guests(struct machines *machines,
490 machine__process_t process, void *data)
491{
492 struct rb_node *nd;
493
494 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
495 struct machine *pos = rb_entry(nd, struct machine, rb_node);
496 process(pos, data);
497 }
498}
499
500void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
501{
502 struct rb_node *node;
503 struct machine *machine;
504
505 machines->host.id_hdr_size = id_hdr_size;
506
507 for (node = rb_first_cached(&machines->guests); node;
508 node = rb_next(node)) {
509 machine = rb_entry(node, struct machine, rb_node);
510 machine->id_hdr_size = id_hdr_size;
511 }
512
513 return;
514}
515
516static void machine__update_thread_pid(struct machine *machine,
517 struct thread *th, pid_t pid)
518{
519 struct thread *leader;
520
521 if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
522 return;
523
524 thread__set_pid(th, pid);
525
526 if (thread__pid(th) == thread__tid(th))
527 return;
528
529 leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
530 if (!leader)
531 goto out_err;
532
533 if (!thread__maps(leader))
534 thread__set_maps(leader, maps__new(machine));
535
536 if (!thread__maps(leader))
537 goto out_err;
538
539 if (thread__maps(th) == thread__maps(leader))
540 goto out_put;
541
542 if (thread__maps(th)) {
543 /*
544 * Maps are created from MMAP events which provide the pid and
545 * tid. Consequently there never should be any maps on a thread
546 * with an unknown pid. Just print an error if there are.
547 */
548 if (!maps__empty(thread__maps(th)))
549 pr_err("Discarding thread maps for %d:%d\n",
550 thread__pid(th), thread__tid(th));
551 maps__put(thread__maps(th));
552 }
553
554 thread__set_maps(th, maps__get(thread__maps(leader)));
555out_put:
556 thread__put(leader);
557 return;
558out_err:
559 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
560 goto out_put;
561}
562
563/*
564 * Front-end cache - TID lookups come in blocks,
565 * so most of the time we dont have to look up
566 * the full rbtree:
567 */
568static struct thread*
569__threads__get_last_match(struct threads *threads, struct machine *machine,
570 int pid, int tid)
571{
572 struct thread *th;
573
574 th = threads->last_match;
575 if (th != NULL) {
576 if (thread__tid(th) == tid) {
577 machine__update_thread_pid(machine, th, pid);
578 return thread__get(th);
579 }
580 thread__put(threads->last_match);
581 threads->last_match = NULL;
582 }
583
584 return NULL;
585}
586
587static struct thread*
588threads__get_last_match(struct threads *threads, struct machine *machine,
589 int pid, int tid)
590{
591 struct thread *th = NULL;
592
593 if (perf_singlethreaded)
594 th = __threads__get_last_match(threads, machine, pid, tid);
595
596 return th;
597}
598
599static void
600__threads__set_last_match(struct threads *threads, struct thread *th)
601{
602 thread__put(threads->last_match);
603 threads->last_match = thread__get(th);
604}
605
606static void
607threads__set_last_match(struct threads *threads, struct thread *th)
608{
609 if (perf_singlethreaded)
610 __threads__set_last_match(threads, th);
611}
612
613/*
614 * Caller must eventually drop thread->refcnt returned with a successful
615 * lookup/new thread inserted.
616 */
617static struct thread *____machine__findnew_thread(struct machine *machine,
618 struct threads *threads,
619 pid_t pid, pid_t tid,
620 bool create)
621{
622 struct rb_node **p = &threads->entries.rb_root.rb_node;
623 struct rb_node *parent = NULL;
624 struct thread *th;
625 struct thread_rb_node *nd;
626 bool leftmost = true;
627
628 th = threads__get_last_match(threads, machine, pid, tid);
629 if (th)
630 return th;
631
632 while (*p != NULL) {
633 parent = *p;
634 th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
635
636 if (thread__tid(th) == tid) {
637 threads__set_last_match(threads, th);
638 machine__update_thread_pid(machine, th, pid);
639 return thread__get(th);
640 }
641
642 if (tid < thread__tid(th))
643 p = &(*p)->rb_left;
644 else {
645 p = &(*p)->rb_right;
646 leftmost = false;
647 }
648 }
649
650 if (!create)
651 return NULL;
652
653 th = thread__new(pid, tid);
654 if (th == NULL)
655 return NULL;
656
657 nd = malloc(sizeof(*nd));
658 if (nd == NULL) {
659 thread__put(th);
660 return NULL;
661 }
662 nd->thread = th;
663
664 rb_link_node(&nd->rb_node, parent, p);
665 rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost);
666 /*
667 * We have to initialize maps separately after rb tree is updated.
668 *
669 * The reason is that we call machine__findnew_thread within
670 * thread__init_maps to find the thread leader and that would screwed
671 * the rb tree.
672 */
673 if (thread__init_maps(th, machine)) {
674 pr_err("Thread init failed thread %d\n", pid);
675 rb_erase_cached(&nd->rb_node, &threads->entries);
676 RB_CLEAR_NODE(&nd->rb_node);
677 free(nd);
678 thread__put(th);
679 return NULL;
680 }
681 /*
682 * It is now in the rbtree, get a ref
683 */
684 threads__set_last_match(threads, th);
685 ++threads->nr;
686
687 return thread__get(th);
688}
689
690struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
691{
692 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
693}
694
695struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
696 pid_t tid)
697{
698 struct threads *threads = machine__threads(machine, tid);
699 struct thread *th;
700
701 down_write(&threads->lock);
702 th = __machine__findnew_thread(machine, pid, tid);
703 up_write(&threads->lock);
704 return th;
705}
706
707struct thread *machine__find_thread(struct machine *machine, pid_t pid,
708 pid_t tid)
709{
710 struct threads *threads = machine__threads(machine, tid);
711 struct thread *th;
712
713 down_read(&threads->lock);
714 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
715 up_read(&threads->lock);
716 return th;
717}
718
719/*
720 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
721 * So here a single thread is created for that, but actually there is a separate
722 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
723 * is only 1. That causes problems for some tools, requiring workarounds. For
724 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
725 */
726struct thread *machine__idle_thread(struct machine *machine)
727{
728 struct thread *thread = machine__findnew_thread(machine, 0, 0);
729
730 if (!thread || thread__set_comm(thread, "swapper", 0) ||
731 thread__set_namespaces(thread, 0, NULL))
732 pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
733
734 return thread;
735}
736
737struct comm *machine__thread_exec_comm(struct machine *machine,
738 struct thread *thread)
739{
740 if (machine->comm_exec)
741 return thread__exec_comm(thread);
742 else
743 return thread__comm(thread);
744}
745
746int machine__process_comm_event(struct machine *machine, union perf_event *event,
747 struct perf_sample *sample)
748{
749 struct thread *thread = machine__findnew_thread(machine,
750 event->comm.pid,
751 event->comm.tid);
752 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
753 int err = 0;
754
755 if (exec)
756 machine->comm_exec = true;
757
758 if (dump_trace)
759 perf_event__fprintf_comm(event, stdout);
760
761 if (thread == NULL ||
762 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
763 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
764 err = -1;
765 }
766
767 thread__put(thread);
768
769 return err;
770}
771
772int machine__process_namespaces_event(struct machine *machine __maybe_unused,
773 union perf_event *event,
774 struct perf_sample *sample __maybe_unused)
775{
776 struct thread *thread = machine__findnew_thread(machine,
777 event->namespaces.pid,
778 event->namespaces.tid);
779 int err = 0;
780
781 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
782 "\nWARNING: kernel seems to support more namespaces than perf"
783 " tool.\nTry updating the perf tool..\n\n");
784
785 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
786 "\nWARNING: perf tool seems to support more namespaces than"
787 " the kernel.\nTry updating the kernel..\n\n");
788
789 if (dump_trace)
790 perf_event__fprintf_namespaces(event, stdout);
791
792 if (thread == NULL ||
793 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
794 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
795 err = -1;
796 }
797
798 thread__put(thread);
799
800 return err;
801}
802
803int machine__process_cgroup_event(struct machine *machine,
804 union perf_event *event,
805 struct perf_sample *sample __maybe_unused)
806{
807 struct cgroup *cgrp;
808
809 if (dump_trace)
810 perf_event__fprintf_cgroup(event, stdout);
811
812 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
813 if (cgrp == NULL)
814 return -ENOMEM;
815
816 return 0;
817}
818
819int machine__process_lost_event(struct machine *machine __maybe_unused,
820 union perf_event *event, struct perf_sample *sample __maybe_unused)
821{
822 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
823 event->lost.id, event->lost.lost);
824 return 0;
825}
826
827int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
828 union perf_event *event, struct perf_sample *sample)
829{
830 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
831 sample->id, event->lost_samples.lost);
832 return 0;
833}
834
835static struct dso *machine__findnew_module_dso(struct machine *machine,
836 struct kmod_path *m,
837 const char *filename)
838{
839 struct dso *dso;
840
841 down_write(&machine->dsos.lock);
842
843 dso = __dsos__find(&machine->dsos, m->name, true);
844 if (!dso) {
845 dso = __dsos__addnew(&machine->dsos, m->name);
846 if (dso == NULL)
847 goto out_unlock;
848
849 dso__set_module_info(dso, m, machine);
850 dso__set_long_name(dso, strdup(filename), true);
851 dso->kernel = DSO_SPACE__KERNEL;
852 }
853
854 dso__get(dso);
855out_unlock:
856 up_write(&machine->dsos.lock);
857 return dso;
858}
859
860int machine__process_aux_event(struct machine *machine __maybe_unused,
861 union perf_event *event)
862{
863 if (dump_trace)
864 perf_event__fprintf_aux(event, stdout);
865 return 0;
866}
867
868int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
869 union perf_event *event)
870{
871 if (dump_trace)
872 perf_event__fprintf_itrace_start(event, stdout);
873 return 0;
874}
875
876int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
877 union perf_event *event)
878{
879 if (dump_trace)
880 perf_event__fprintf_aux_output_hw_id(event, stdout);
881 return 0;
882}
883
884int machine__process_switch_event(struct machine *machine __maybe_unused,
885 union perf_event *event)
886{
887 if (dump_trace)
888 perf_event__fprintf_switch(event, stdout);
889 return 0;
890}
891
892static int machine__process_ksymbol_register(struct machine *machine,
893 union perf_event *event,
894 struct perf_sample *sample __maybe_unused)
895{
896 struct symbol *sym;
897 struct dso *dso;
898 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
899 bool put_map = false;
900 int err = 0;
901
902 if (!map) {
903 dso = dso__new(event->ksymbol.name);
904
905 if (!dso) {
906 err = -ENOMEM;
907 goto out;
908 }
909 dso->kernel = DSO_SPACE__KERNEL;
910 map = map__new2(0, dso);
911 dso__put(dso);
912 if (!map) {
913 err = -ENOMEM;
914 goto out;
915 }
916 /*
917 * The inserted map has a get on it, we need to put to release
918 * the reference count here, but do it after all accesses are
919 * done.
920 */
921 put_map = true;
922 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
923 dso->binary_type = DSO_BINARY_TYPE__OOL;
924 dso->data.file_size = event->ksymbol.len;
925 dso__set_loaded(dso);
926 }
927
928 map__set_start(map, event->ksymbol.addr);
929 map__set_end(map, map__start(map) + event->ksymbol.len);
930 err = maps__insert(machine__kernel_maps(machine), map);
931 if (err) {
932 err = -ENOMEM;
933 goto out;
934 }
935
936 dso__set_loaded(dso);
937
938 if (is_bpf_image(event->ksymbol.name)) {
939 dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
940 dso__set_long_name(dso, "", false);
941 }
942 } else {
943 dso = map__dso(map);
944 }
945
946 sym = symbol__new(map__map_ip(map, map__start(map)),
947 event->ksymbol.len,
948 0, 0, event->ksymbol.name);
949 if (!sym) {
950 err = -ENOMEM;
951 goto out;
952 }
953 dso__insert_symbol(dso, sym);
954out:
955 if (put_map)
956 map__put(map);
957 return err;
958}
959
960static int machine__process_ksymbol_unregister(struct machine *machine,
961 union perf_event *event,
962 struct perf_sample *sample __maybe_unused)
963{
964 struct symbol *sym;
965 struct map *map;
966
967 map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
968 if (!map)
969 return 0;
970
971 if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
972 maps__remove(machine__kernel_maps(machine), map);
973 else {
974 struct dso *dso = map__dso(map);
975
976 sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
977 if (sym)
978 dso__delete_symbol(dso, sym);
979 }
980
981 return 0;
982}
983
984int machine__process_ksymbol(struct machine *machine __maybe_unused,
985 union perf_event *event,
986 struct perf_sample *sample)
987{
988 if (dump_trace)
989 perf_event__fprintf_ksymbol(event, stdout);
990
991 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
992 return machine__process_ksymbol_unregister(machine, event,
993 sample);
994 return machine__process_ksymbol_register(machine, event, sample);
995}
996
997int machine__process_text_poke(struct machine *machine, union perf_event *event,
998 struct perf_sample *sample __maybe_unused)
999{
1000 struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
1001 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1002 struct dso *dso = map ? map__dso(map) : NULL;
1003
1004 if (dump_trace)
1005 perf_event__fprintf_text_poke(event, machine, stdout);
1006
1007 if (!event->text_poke.new_len)
1008 return 0;
1009
1010 if (cpumode != PERF_RECORD_MISC_KERNEL) {
1011 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
1012 return 0;
1013 }
1014
1015 if (dso) {
1016 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
1017 int ret;
1018
1019 /*
1020 * Kernel maps might be changed when loading symbols so loading
1021 * must be done prior to using kernel maps.
1022 */
1023 map__load(map);
1024 ret = dso__data_write_cache_addr(dso, map, machine,
1025 event->text_poke.addr,
1026 new_bytes,
1027 event->text_poke.new_len);
1028 if (ret != event->text_poke.new_len)
1029 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
1030 event->text_poke.addr);
1031 } else {
1032 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
1033 event->text_poke.addr);
1034 }
1035
1036 return 0;
1037}
1038
1039static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
1040 const char *filename)
1041{
1042 struct map *map = NULL;
1043 struct kmod_path m;
1044 struct dso *dso;
1045 int err;
1046
1047 if (kmod_path__parse_name(&m, filename))
1048 return NULL;
1049
1050 dso = machine__findnew_module_dso(machine, &m, filename);
1051 if (dso == NULL)
1052 goto out;
1053
1054 map = map__new2(start, dso);
1055 if (map == NULL)
1056 goto out;
1057
1058 err = maps__insert(machine__kernel_maps(machine), map);
1059 /* If maps__insert failed, return NULL. */
1060 if (err) {
1061 map__put(map);
1062 map = NULL;
1063 }
1064out:
1065 /* put the dso here, corresponding to machine__findnew_module_dso */
1066 dso__put(dso);
1067 zfree(&m.name);
1068 return map;
1069}
1070
1071size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
1072{
1073 struct rb_node *nd;
1074 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
1075
1076 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1077 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1078 ret += __dsos__fprintf(&pos->dsos.head, fp);
1079 }
1080
1081 return ret;
1082}
1083
1084size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
1085 bool (skip)(struct dso *dso, int parm), int parm)
1086{
1087 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
1088}
1089
1090size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
1091 bool (skip)(struct dso *dso, int parm), int parm)
1092{
1093 struct rb_node *nd;
1094 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
1095
1096 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1097 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1098 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
1099 }
1100 return ret;
1101}
1102
1103size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
1104{
1105 int i;
1106 size_t printed = 0;
1107 struct dso *kdso = machine__kernel_dso(machine);
1108
1109 if (kdso->has_build_id) {
1110 char filename[PATH_MAX];
1111 if (dso__build_id_filename(kdso, filename, sizeof(filename),
1112 false))
1113 printed += fprintf(fp, "[0] %s\n", filename);
1114 }
1115
1116 for (i = 0; i < vmlinux_path__nr_entries; ++i)
1117 printed += fprintf(fp, "[%d] %s\n",
1118 i + kdso->has_build_id, vmlinux_path[i]);
1119
1120 return printed;
1121}
1122
1123size_t machine__fprintf(struct machine *machine, FILE *fp)
1124{
1125 struct rb_node *nd;
1126 size_t ret;
1127 int i;
1128
1129 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
1130 struct threads *threads = &machine->threads[i];
1131
1132 down_read(&threads->lock);
1133
1134 ret = fprintf(fp, "Threads: %u\n", threads->nr);
1135
1136 for (nd = rb_first_cached(&threads->entries); nd;
1137 nd = rb_next(nd)) {
1138 struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
1139
1140 ret += thread__fprintf(pos, fp);
1141 }
1142
1143 up_read(&threads->lock);
1144 }
1145 return ret;
1146}
1147
1148static struct dso *machine__get_kernel(struct machine *machine)
1149{
1150 const char *vmlinux_name = machine->mmap_name;
1151 struct dso *kernel;
1152
1153 if (machine__is_host(machine)) {
1154 if (symbol_conf.vmlinux_name)
1155 vmlinux_name = symbol_conf.vmlinux_name;
1156
1157 kernel = machine__findnew_kernel(machine, vmlinux_name,
1158 "[kernel]", DSO_SPACE__KERNEL);
1159 } else {
1160 if (symbol_conf.default_guest_vmlinux_name)
1161 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1162
1163 kernel = machine__findnew_kernel(machine, vmlinux_name,
1164 "[guest.kernel]",
1165 DSO_SPACE__KERNEL_GUEST);
1166 }
1167
1168 if (kernel != NULL && (!kernel->has_build_id))
1169 dso__read_running_kernel_build_id(kernel, machine);
1170
1171 return kernel;
1172}
1173
1174void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1175 size_t bufsz)
1176{
1177 if (machine__is_default_guest(machine))
1178 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1179 else
1180 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1181}
1182
1183const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1184
1185/* Figure out the start address of kernel map from /proc/kallsyms.
1186 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1187 * symbol_name if it's not that important.
1188 */
1189static int machine__get_running_kernel_start(struct machine *machine,
1190 const char **symbol_name,
1191 u64 *start, u64 *end)
1192{
1193 char filename[PATH_MAX];
1194 int i, err = -1;
1195 const char *name;
1196 u64 addr = 0;
1197
1198 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1199
1200 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1201 return 0;
1202
1203 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1204 err = kallsyms__get_function_start(filename, name, &addr);
1205 if (!err)
1206 break;
1207 }
1208
1209 if (err)
1210 return -1;
1211
1212 if (symbol_name)
1213 *symbol_name = name;
1214
1215 *start = addr;
1216
1217 err = kallsyms__get_symbol_start(filename, "_edata", &addr);
1218 if (err)
1219 err = kallsyms__get_function_start(filename, "_etext", &addr);
1220 if (!err)
1221 *end = addr;
1222
1223 return 0;
1224}
1225
1226int machine__create_extra_kernel_map(struct machine *machine,
1227 struct dso *kernel,
1228 struct extra_kernel_map *xm)
1229{
1230 struct kmap *kmap;
1231 struct map *map;
1232 int err;
1233
1234 map = map__new2(xm->start, kernel);
1235 if (!map)
1236 return -ENOMEM;
1237
1238 map__set_end(map, xm->end);
1239 map__set_pgoff(map, xm->pgoff);
1240
1241 kmap = map__kmap(map);
1242
1243 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1244
1245 err = maps__insert(machine__kernel_maps(machine), map);
1246
1247 if (!err) {
1248 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1249 kmap->name, map__start(map), map__end(map));
1250 }
1251
1252 map__put(map);
1253
1254 return err;
1255}
1256
1257static u64 find_entry_trampoline(struct dso *dso)
1258{
1259 /* Duplicates are removed so lookup all aliases */
1260 const char *syms[] = {
1261 "_entry_trampoline",
1262 "__entry_trampoline_start",
1263 "entry_SYSCALL_64_trampoline",
1264 };
1265 struct symbol *sym = dso__first_symbol(dso);
1266 unsigned int i;
1267
1268 for (; sym; sym = dso__next_symbol(sym)) {
1269 if (sym->binding != STB_GLOBAL)
1270 continue;
1271 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1272 if (!strcmp(sym->name, syms[i]))
1273 return sym->start;
1274 }
1275 }
1276
1277 return 0;
1278}
1279
1280/*
1281 * These values can be used for kernels that do not have symbols for the entry
1282 * trampolines in kallsyms.
1283 */
1284#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1285#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1286#define X86_64_ENTRY_TRAMPOLINE 0x6000
1287
1288struct machine__map_x86_64_entry_trampolines_args {
1289 struct maps *kmaps;
1290 bool found;
1291};
1292
1293static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
1294{
1295 struct machine__map_x86_64_entry_trampolines_args *args = data;
1296 struct map *dest_map;
1297 struct kmap *kmap = __map__kmap(map);
1298
1299 if (!kmap || !is_entry_trampoline(kmap->name))
1300 return 0;
1301
1302 dest_map = maps__find(args->kmaps, map__pgoff(map));
1303 if (dest_map != map)
1304 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1305
1306 args->found = true;
1307 return 0;
1308}
1309
1310/* Map x86_64 PTI entry trampolines */
1311int machine__map_x86_64_entry_trampolines(struct machine *machine,
1312 struct dso *kernel)
1313{
1314 struct machine__map_x86_64_entry_trampolines_args args = {
1315 .kmaps = machine__kernel_maps(machine),
1316 .found = false,
1317 };
1318 int nr_cpus_avail, cpu;
1319 u64 pgoff;
1320
1321 /*
1322 * In the vmlinux case, pgoff is a virtual address which must now be
1323 * mapped to a vmlinux offset.
1324 */
1325 maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
1326
1327 if (args.found || machine->trampolines_mapped)
1328 return 0;
1329
1330 pgoff = find_entry_trampoline(kernel);
1331 if (!pgoff)
1332 return 0;
1333
1334 nr_cpus_avail = machine__nr_cpus_avail(machine);
1335
1336 /* Add a 1 page map for each CPU's entry trampoline */
1337 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1338 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1339 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1340 X86_64_ENTRY_TRAMPOLINE;
1341 struct extra_kernel_map xm = {
1342 .start = va,
1343 .end = va + page_size,
1344 .pgoff = pgoff,
1345 };
1346
1347 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1348
1349 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1350 return -1;
1351 }
1352
1353 machine->trampolines_mapped = nr_cpus_avail;
1354
1355 return 0;
1356}
1357
1358int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1359 struct dso *kernel __maybe_unused)
1360{
1361 return 0;
1362}
1363
1364static int
1365__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1366{
1367 /* In case of renewal the kernel map, destroy previous one */
1368 machine__destroy_kernel_maps(machine);
1369
1370 map__put(machine->vmlinux_map);
1371 machine->vmlinux_map = map__new2(0, kernel);
1372 if (machine->vmlinux_map == NULL)
1373 return -ENOMEM;
1374
1375 map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
1376 return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1377}
1378
1379void machine__destroy_kernel_maps(struct machine *machine)
1380{
1381 struct kmap *kmap;
1382 struct map *map = machine__kernel_map(machine);
1383
1384 if (map == NULL)
1385 return;
1386
1387 kmap = map__kmap(map);
1388 maps__remove(machine__kernel_maps(machine), map);
1389 if (kmap && kmap->ref_reloc_sym) {
1390 zfree((char **)&kmap->ref_reloc_sym->name);
1391 zfree(&kmap->ref_reloc_sym);
1392 }
1393
1394 map__zput(machine->vmlinux_map);
1395}
1396
1397int machines__create_guest_kernel_maps(struct machines *machines)
1398{
1399 int ret = 0;
1400 struct dirent **namelist = NULL;
1401 int i, items = 0;
1402 char path[PATH_MAX];
1403 pid_t pid;
1404 char *endp;
1405
1406 if (symbol_conf.default_guest_vmlinux_name ||
1407 symbol_conf.default_guest_modules ||
1408 symbol_conf.default_guest_kallsyms) {
1409 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1410 }
1411
1412 if (symbol_conf.guestmount) {
1413 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1414 if (items <= 0)
1415 return -ENOENT;
1416 for (i = 0; i < items; i++) {
1417 if (!isdigit(namelist[i]->d_name[0])) {
1418 /* Filter out . and .. */
1419 continue;
1420 }
1421 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1422 if ((*endp != '\0') ||
1423 (endp == namelist[i]->d_name) ||
1424 (errno == ERANGE)) {
1425 pr_debug("invalid directory (%s). Skipping.\n",
1426 namelist[i]->d_name);
1427 continue;
1428 }
1429 sprintf(path, "%s/%s/proc/kallsyms",
1430 symbol_conf.guestmount,
1431 namelist[i]->d_name);
1432 ret = access(path, R_OK);
1433 if (ret) {
1434 pr_debug("Can't access file %s\n", path);
1435 goto failure;
1436 }
1437 machines__create_kernel_maps(machines, pid);
1438 }
1439failure:
1440 free(namelist);
1441 }
1442
1443 return ret;
1444}
1445
1446void machines__destroy_kernel_maps(struct machines *machines)
1447{
1448 struct rb_node *next = rb_first_cached(&machines->guests);
1449
1450 machine__destroy_kernel_maps(&machines->host);
1451
1452 while (next) {
1453 struct machine *pos = rb_entry(next, struct machine, rb_node);
1454
1455 next = rb_next(&pos->rb_node);
1456 rb_erase_cached(&pos->rb_node, &machines->guests);
1457 machine__delete(pos);
1458 }
1459}
1460
1461int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1462{
1463 struct machine *machine = machines__findnew(machines, pid);
1464
1465 if (machine == NULL)
1466 return -1;
1467
1468 return machine__create_kernel_maps(machine);
1469}
1470
1471int machine__load_kallsyms(struct machine *machine, const char *filename)
1472{
1473 struct map *map = machine__kernel_map(machine);
1474 struct dso *dso = map__dso(map);
1475 int ret = __dso__load_kallsyms(dso, filename, map, true);
1476
1477 if (ret > 0) {
1478 dso__set_loaded(dso);
1479 /*
1480 * Since /proc/kallsyms will have multiple sessions for the
1481 * kernel, with modules between them, fixup the end of all
1482 * sections.
1483 */
1484 maps__fixup_end(machine__kernel_maps(machine));
1485 }
1486
1487 return ret;
1488}
1489
1490int machine__load_vmlinux_path(struct machine *machine)
1491{
1492 struct map *map = machine__kernel_map(machine);
1493 struct dso *dso = map__dso(map);
1494 int ret = dso__load_vmlinux_path(dso, map);
1495
1496 if (ret > 0)
1497 dso__set_loaded(dso);
1498
1499 return ret;
1500}
1501
1502static char *get_kernel_version(const char *root_dir)
1503{
1504 char version[PATH_MAX];
1505 FILE *file;
1506 char *name, *tmp;
1507 const char *prefix = "Linux version ";
1508
1509 sprintf(version, "%s/proc/version", root_dir);
1510 file = fopen(version, "r");
1511 if (!file)
1512 return NULL;
1513
1514 tmp = fgets(version, sizeof(version), file);
1515 fclose(file);
1516 if (!tmp)
1517 return NULL;
1518
1519 name = strstr(version, prefix);
1520 if (!name)
1521 return NULL;
1522 name += strlen(prefix);
1523 tmp = strchr(name, ' ');
1524 if (tmp)
1525 *tmp = '\0';
1526
1527 return strdup(name);
1528}
1529
1530static bool is_kmod_dso(struct dso *dso)
1531{
1532 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1533 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1534}
1535
1536static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1537{
1538 char *long_name;
1539 struct dso *dso;
1540 struct map *map = maps__find_by_name(maps, m->name);
1541
1542 if (map == NULL)
1543 return 0;
1544
1545 long_name = strdup(path);
1546 if (long_name == NULL)
1547 return -ENOMEM;
1548
1549 dso = map__dso(map);
1550 dso__set_long_name(dso, long_name, true);
1551 dso__kernel_module_get_build_id(dso, "");
1552
1553 /*
1554 * Full name could reveal us kmod compression, so
1555 * we need to update the symtab_type if needed.
1556 */
1557 if (m->comp && is_kmod_dso(dso)) {
1558 dso->symtab_type++;
1559 dso->comp = m->comp;
1560 }
1561
1562 return 0;
1563}
1564
1565static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1566{
1567 struct dirent *dent;
1568 DIR *dir = opendir(dir_name);
1569 int ret = 0;
1570
1571 if (!dir) {
1572 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1573 return -1;
1574 }
1575
1576 while ((dent = readdir(dir)) != NULL) {
1577 char path[PATH_MAX];
1578 struct stat st;
1579
1580 /*sshfs might return bad dent->d_type, so we have to stat*/
1581 path__join(path, sizeof(path), dir_name, dent->d_name);
1582 if (stat(path, &st))
1583 continue;
1584
1585 if (S_ISDIR(st.st_mode)) {
1586 if (!strcmp(dent->d_name, ".") ||
1587 !strcmp(dent->d_name, ".."))
1588 continue;
1589
1590 /* Do not follow top-level source and build symlinks */
1591 if (depth == 0) {
1592 if (!strcmp(dent->d_name, "source") ||
1593 !strcmp(dent->d_name, "build"))
1594 continue;
1595 }
1596
1597 ret = maps__set_modules_path_dir(maps, path, depth + 1);
1598 if (ret < 0)
1599 goto out;
1600 } else {
1601 struct kmod_path m;
1602
1603 ret = kmod_path__parse_name(&m, dent->d_name);
1604 if (ret)
1605 goto out;
1606
1607 if (m.kmod)
1608 ret = maps__set_module_path(maps, path, &m);
1609
1610 zfree(&m.name);
1611
1612 if (ret)
1613 goto out;
1614 }
1615 }
1616
1617out:
1618 closedir(dir);
1619 return ret;
1620}
1621
1622static int machine__set_modules_path(struct machine *machine)
1623{
1624 char *version;
1625 char modules_path[PATH_MAX];
1626
1627 version = get_kernel_version(machine->root_dir);
1628 if (!version)
1629 return -1;
1630
1631 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1632 machine->root_dir, version);
1633 free(version);
1634
1635 return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1636}
1637int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1638 u64 *size __maybe_unused,
1639 const char *name __maybe_unused)
1640{
1641 return 0;
1642}
1643
1644static int machine__create_module(void *arg, const char *name, u64 start,
1645 u64 size)
1646{
1647 struct machine *machine = arg;
1648 struct map *map;
1649
1650 if (arch__fix_module_text_start(&start, &size, name) < 0)
1651 return -1;
1652
1653 map = machine__addnew_module_map(machine, start, name);
1654 if (map == NULL)
1655 return -1;
1656 map__set_end(map, start + size);
1657
1658 dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1659 map__put(map);
1660 return 0;
1661}
1662
1663static int machine__create_modules(struct machine *machine)
1664{
1665 const char *modules;
1666 char path[PATH_MAX];
1667
1668 if (machine__is_default_guest(machine)) {
1669 modules = symbol_conf.default_guest_modules;
1670 } else {
1671 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1672 modules = path;
1673 }
1674
1675 if (symbol__restricted_filename(modules, "/proc/modules"))
1676 return -1;
1677
1678 if (modules__parse(modules, machine, machine__create_module))
1679 return -1;
1680
1681 if (!machine__set_modules_path(machine))
1682 return 0;
1683
1684 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1685
1686 return 0;
1687}
1688
1689static void machine__set_kernel_mmap(struct machine *machine,
1690 u64 start, u64 end)
1691{
1692 map__set_start(machine->vmlinux_map, start);
1693 map__set_end(machine->vmlinux_map, end);
1694 /*
1695 * Be a bit paranoid here, some perf.data file came with
1696 * a zero sized synthesized MMAP event for the kernel.
1697 */
1698 if (start == 0 && end == 0)
1699 map__set_end(machine->vmlinux_map, ~0ULL);
1700}
1701
1702static int machine__update_kernel_mmap(struct machine *machine,
1703 u64 start, u64 end)
1704{
1705 struct map *orig, *updated;
1706 int err;
1707
1708 orig = machine->vmlinux_map;
1709 updated = map__get(orig);
1710
1711 machine->vmlinux_map = updated;
1712 machine__set_kernel_mmap(machine, start, end);
1713 maps__remove(machine__kernel_maps(machine), orig);
1714 err = maps__insert(machine__kernel_maps(machine), updated);
1715 map__put(orig);
1716
1717 return err;
1718}
1719
1720int machine__create_kernel_maps(struct machine *machine)
1721{
1722 struct dso *kernel = machine__get_kernel(machine);
1723 const char *name = NULL;
1724 u64 start = 0, end = ~0ULL;
1725 int ret;
1726
1727 if (kernel == NULL)
1728 return -1;
1729
1730 ret = __machine__create_kernel_maps(machine, kernel);
1731 if (ret < 0)
1732 goto out_put;
1733
1734 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1735 if (machine__is_host(machine))
1736 pr_debug("Problems creating module maps, "
1737 "continuing anyway...\n");
1738 else
1739 pr_debug("Problems creating module maps for guest %d, "
1740 "continuing anyway...\n", machine->pid);
1741 }
1742
1743 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1744 if (name &&
1745 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1746 machine__destroy_kernel_maps(machine);
1747 ret = -1;
1748 goto out_put;
1749 }
1750
1751 /*
1752 * we have a real start address now, so re-order the kmaps
1753 * assume it's the last in the kmaps
1754 */
1755 ret = machine__update_kernel_mmap(machine, start, end);
1756 if (ret < 0)
1757 goto out_put;
1758 }
1759
1760 if (machine__create_extra_kernel_maps(machine, kernel))
1761 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1762
1763 if (end == ~0ULL) {
1764 /* update end address of the kernel map using adjacent module address */
1765 struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
1766 machine__kernel_map(machine));
1767
1768 if (next)
1769 machine__set_kernel_mmap(machine, start, map__start(next));
1770 }
1771
1772out_put:
1773 dso__put(kernel);
1774 return ret;
1775}
1776
1777static bool machine__uses_kcore(struct machine *machine)
1778{
1779 struct dso *dso;
1780
1781 list_for_each_entry(dso, &machine->dsos.head, node) {
1782 if (dso__is_kcore(dso))
1783 return true;
1784 }
1785
1786 return false;
1787}
1788
1789static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1790 struct extra_kernel_map *xm)
1791{
1792 return machine__is(machine, "x86_64") &&
1793 is_entry_trampoline(xm->name);
1794}
1795
1796static int machine__process_extra_kernel_map(struct machine *machine,
1797 struct extra_kernel_map *xm)
1798{
1799 struct dso *kernel = machine__kernel_dso(machine);
1800
1801 if (kernel == NULL)
1802 return -1;
1803
1804 return machine__create_extra_kernel_map(machine, kernel, xm);
1805}
1806
1807static int machine__process_kernel_mmap_event(struct machine *machine,
1808 struct extra_kernel_map *xm,
1809 struct build_id *bid)
1810{
1811 enum dso_space_type dso_space;
1812 bool is_kernel_mmap;
1813 const char *mmap_name = machine->mmap_name;
1814
1815 /* If we have maps from kcore then we do not need or want any others */
1816 if (machine__uses_kcore(machine))
1817 return 0;
1818
1819 if (machine__is_host(machine))
1820 dso_space = DSO_SPACE__KERNEL;
1821 else
1822 dso_space = DSO_SPACE__KERNEL_GUEST;
1823
1824 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1825 if (!is_kernel_mmap && !machine__is_host(machine)) {
1826 /*
1827 * If the event was recorded inside the guest and injected into
1828 * the host perf.data file, then it will match a host mmap_name,
1829 * so try that - see machine__set_mmap_name().
1830 */
1831 mmap_name = "[kernel.kallsyms]";
1832 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1833 }
1834 if (xm->name[0] == '/' ||
1835 (!is_kernel_mmap && xm->name[0] == '[')) {
1836 struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
1837
1838 if (map == NULL)
1839 goto out_problem;
1840
1841 map__set_end(map, map__start(map) + xm->end - xm->start);
1842
1843 if (build_id__is_defined(bid))
1844 dso__set_build_id(map__dso(map), bid);
1845
1846 map__put(map);
1847 } else if (is_kernel_mmap) {
1848 const char *symbol_name = xm->name + strlen(mmap_name);
1849 /*
1850 * Should be there already, from the build-id table in
1851 * the header.
1852 */
1853 struct dso *kernel = NULL;
1854 struct dso *dso;
1855
1856 down_read(&machine->dsos.lock);
1857
1858 list_for_each_entry(dso, &machine->dsos.head, node) {
1859
1860 /*
1861 * The cpumode passed to is_kernel_module is not the
1862 * cpumode of *this* event. If we insist on passing
1863 * correct cpumode to is_kernel_module, we should
1864 * record the cpumode when we adding this dso to the
1865 * linked list.
1866 *
1867 * However we don't really need passing correct
1868 * cpumode. We know the correct cpumode must be kernel
1869 * mode (if not, we should not link it onto kernel_dsos
1870 * list).
1871 *
1872 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1873 * is_kernel_module() treats it as a kernel cpumode.
1874 */
1875
1876 if (!dso->kernel ||
1877 is_kernel_module(dso->long_name,
1878 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1879 continue;
1880
1881
1882 kernel = dso__get(dso);
1883 break;
1884 }
1885
1886 up_read(&machine->dsos.lock);
1887
1888 if (kernel == NULL)
1889 kernel = machine__findnew_dso(machine, machine->mmap_name);
1890 if (kernel == NULL)
1891 goto out_problem;
1892
1893 kernel->kernel = dso_space;
1894 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1895 dso__put(kernel);
1896 goto out_problem;
1897 }
1898
1899 if (strstr(kernel->long_name, "vmlinux"))
1900 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1901
1902 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1903 dso__put(kernel);
1904 goto out_problem;
1905 }
1906
1907 if (build_id__is_defined(bid))
1908 dso__set_build_id(kernel, bid);
1909
1910 /*
1911 * Avoid using a zero address (kptr_restrict) for the ref reloc
1912 * symbol. Effectively having zero here means that at record
1913 * time /proc/sys/kernel/kptr_restrict was non zero.
1914 */
1915 if (xm->pgoff != 0) {
1916 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1917 symbol_name,
1918 xm->pgoff);
1919 }
1920
1921 if (machine__is_default_guest(machine)) {
1922 /*
1923 * preload dso of guest kernel and modules
1924 */
1925 dso__load(kernel, machine__kernel_map(machine));
1926 }
1927 dso__put(kernel);
1928 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1929 return machine__process_extra_kernel_map(machine, xm);
1930 }
1931 return 0;
1932out_problem:
1933 return -1;
1934}
1935
1936int machine__process_mmap2_event(struct machine *machine,
1937 union perf_event *event,
1938 struct perf_sample *sample)
1939{
1940 struct thread *thread;
1941 struct map *map;
1942 struct dso_id dso_id = {
1943 .maj = event->mmap2.maj,
1944 .min = event->mmap2.min,
1945 .ino = event->mmap2.ino,
1946 .ino_generation = event->mmap2.ino_generation,
1947 };
1948 struct build_id __bid, *bid = NULL;
1949 int ret = 0;
1950
1951 if (dump_trace)
1952 perf_event__fprintf_mmap2(event, stdout);
1953
1954 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1955 bid = &__bid;
1956 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1957 }
1958
1959 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1960 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1961 struct extra_kernel_map xm = {
1962 .start = event->mmap2.start,
1963 .end = event->mmap2.start + event->mmap2.len,
1964 .pgoff = event->mmap2.pgoff,
1965 };
1966
1967 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1968 ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1969 if (ret < 0)
1970 goto out_problem;
1971 return 0;
1972 }
1973
1974 thread = machine__findnew_thread(machine, event->mmap2.pid,
1975 event->mmap2.tid);
1976 if (thread == NULL)
1977 goto out_problem;
1978
1979 map = map__new(machine, event->mmap2.start,
1980 event->mmap2.len, event->mmap2.pgoff,
1981 &dso_id, event->mmap2.prot,
1982 event->mmap2.flags, bid,
1983 event->mmap2.filename, thread);
1984
1985 if (map == NULL)
1986 goto out_problem_map;
1987
1988 ret = thread__insert_map(thread, map);
1989 if (ret)
1990 goto out_problem_insert;
1991
1992 thread__put(thread);
1993 map__put(map);
1994 return 0;
1995
1996out_problem_insert:
1997 map__put(map);
1998out_problem_map:
1999 thread__put(thread);
2000out_problem:
2001 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
2002 return 0;
2003}
2004
2005int machine__process_mmap_event(struct machine *machine, union perf_event *event,
2006 struct perf_sample *sample)
2007{
2008 struct thread *thread;
2009 struct map *map;
2010 u32 prot = 0;
2011 int ret = 0;
2012
2013 if (dump_trace)
2014 perf_event__fprintf_mmap(event, stdout);
2015
2016 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
2017 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
2018 struct extra_kernel_map xm = {
2019 .start = event->mmap.start,
2020 .end = event->mmap.start + event->mmap.len,
2021 .pgoff = event->mmap.pgoff,
2022 };
2023
2024 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
2025 ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
2026 if (ret < 0)
2027 goto out_problem;
2028 return 0;
2029 }
2030
2031 thread = machine__findnew_thread(machine, event->mmap.pid,
2032 event->mmap.tid);
2033 if (thread == NULL)
2034 goto out_problem;
2035
2036 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
2037 prot = PROT_EXEC;
2038
2039 map = map__new(machine, event->mmap.start,
2040 event->mmap.len, event->mmap.pgoff,
2041 NULL, prot, 0, NULL, event->mmap.filename, thread);
2042
2043 if (map == NULL)
2044 goto out_problem_map;
2045
2046 ret = thread__insert_map(thread, map);
2047 if (ret)
2048 goto out_problem_insert;
2049
2050 thread__put(thread);
2051 map__put(map);
2052 return 0;
2053
2054out_problem_insert:
2055 map__put(map);
2056out_problem_map:
2057 thread__put(thread);
2058out_problem:
2059 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
2060 return 0;
2061}
2062
2063static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
2064 struct thread *th, bool lock)
2065{
2066 struct threads *threads = machine__threads(machine, thread__tid(th));
2067
2068 if (!nd)
2069 nd = thread_rb_node__find(th, &threads->entries.rb_root);
2070
2071 if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th))
2072 threads__set_last_match(threads, NULL);
2073
2074 if (lock)
2075 down_write(&threads->lock);
2076
2077 BUG_ON(refcount_read(thread__refcnt(th)) == 0);
2078
2079 thread__put(nd->thread);
2080 rb_erase_cached(&nd->rb_node, &threads->entries);
2081 RB_CLEAR_NODE(&nd->rb_node);
2082 --threads->nr;
2083
2084 free(nd);
2085
2086 if (lock)
2087 up_write(&threads->lock);
2088}
2089
2090void machine__remove_thread(struct machine *machine, struct thread *th)
2091{
2092 return __machine__remove_thread(machine, NULL, th, true);
2093}
2094
2095int machine__process_fork_event(struct machine *machine, union perf_event *event,
2096 struct perf_sample *sample)
2097{
2098 struct thread *thread = machine__find_thread(machine,
2099 event->fork.pid,
2100 event->fork.tid);
2101 struct thread *parent = machine__findnew_thread(machine,
2102 event->fork.ppid,
2103 event->fork.ptid);
2104 bool do_maps_clone = true;
2105 int err = 0;
2106
2107 if (dump_trace)
2108 perf_event__fprintf_task(event, stdout);
2109
2110 /*
2111 * There may be an existing thread that is not actually the parent,
2112 * either because we are processing events out of order, or because the
2113 * (fork) event that would have removed the thread was lost. Assume the
2114 * latter case and continue on as best we can.
2115 */
2116 if (thread__pid(parent) != (pid_t)event->fork.ppid) {
2117 dump_printf("removing erroneous parent thread %d/%d\n",
2118 thread__pid(parent), thread__tid(parent));
2119 machine__remove_thread(machine, parent);
2120 thread__put(parent);
2121 parent = machine__findnew_thread(machine, event->fork.ppid,
2122 event->fork.ptid);
2123 }
2124
2125 /* if a thread currently exists for the thread id remove it */
2126 if (thread != NULL) {
2127 machine__remove_thread(machine, thread);
2128 thread__put(thread);
2129 }
2130
2131 thread = machine__findnew_thread(machine, event->fork.pid,
2132 event->fork.tid);
2133 /*
2134 * When synthesizing FORK events, we are trying to create thread
2135 * objects for the already running tasks on the machine.
2136 *
2137 * Normally, for a kernel FORK event, we want to clone the parent's
2138 * maps because that is what the kernel just did.
2139 *
2140 * But when synthesizing, this should not be done. If we do, we end up
2141 * with overlapping maps as we process the synthesized MMAP2 events that
2142 * get delivered shortly thereafter.
2143 *
2144 * Use the FORK event misc flags in an internal way to signal this
2145 * situation, so we can elide the map clone when appropriate.
2146 */
2147 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
2148 do_maps_clone = false;
2149
2150 if (thread == NULL || parent == NULL ||
2151 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
2152 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
2153 err = -1;
2154 }
2155 thread__put(thread);
2156 thread__put(parent);
2157
2158 return err;
2159}
2160
2161int machine__process_exit_event(struct machine *machine, union perf_event *event,
2162 struct perf_sample *sample __maybe_unused)
2163{
2164 struct thread *thread = machine__find_thread(machine,
2165 event->fork.pid,
2166 event->fork.tid);
2167
2168 if (dump_trace)
2169 perf_event__fprintf_task(event, stdout);
2170
2171 if (thread != NULL) {
2172 if (symbol_conf.keep_exited_threads)
2173 thread__set_exited(thread, /*exited=*/true);
2174 else
2175 machine__remove_thread(machine, thread);
2176 }
2177 thread__put(thread);
2178 return 0;
2179}
2180
2181int machine__process_event(struct machine *machine, union perf_event *event,
2182 struct perf_sample *sample)
2183{
2184 int ret;
2185
2186 switch (event->header.type) {
2187 case PERF_RECORD_COMM:
2188 ret = machine__process_comm_event(machine, event, sample); break;
2189 case PERF_RECORD_MMAP:
2190 ret = machine__process_mmap_event(machine, event, sample); break;
2191 case PERF_RECORD_NAMESPACES:
2192 ret = machine__process_namespaces_event(machine, event, sample); break;
2193 case PERF_RECORD_CGROUP:
2194 ret = machine__process_cgroup_event(machine, event, sample); break;
2195 case PERF_RECORD_MMAP2:
2196 ret = machine__process_mmap2_event(machine, event, sample); break;
2197 case PERF_RECORD_FORK:
2198 ret = machine__process_fork_event(machine, event, sample); break;
2199 case PERF_RECORD_EXIT:
2200 ret = machine__process_exit_event(machine, event, sample); break;
2201 case PERF_RECORD_LOST:
2202 ret = machine__process_lost_event(machine, event, sample); break;
2203 case PERF_RECORD_AUX:
2204 ret = machine__process_aux_event(machine, event); break;
2205 case PERF_RECORD_ITRACE_START:
2206 ret = machine__process_itrace_start_event(machine, event); break;
2207 case PERF_RECORD_LOST_SAMPLES:
2208 ret = machine__process_lost_samples_event(machine, event, sample); break;
2209 case PERF_RECORD_SWITCH:
2210 case PERF_RECORD_SWITCH_CPU_WIDE:
2211 ret = machine__process_switch_event(machine, event); break;
2212 case PERF_RECORD_KSYMBOL:
2213 ret = machine__process_ksymbol(machine, event, sample); break;
2214 case PERF_RECORD_BPF_EVENT:
2215 ret = machine__process_bpf(machine, event, sample); break;
2216 case PERF_RECORD_TEXT_POKE:
2217 ret = machine__process_text_poke(machine, event, sample); break;
2218 case PERF_RECORD_AUX_OUTPUT_HW_ID:
2219 ret = machine__process_aux_output_hw_id_event(machine, event); break;
2220 default:
2221 ret = -1;
2222 break;
2223 }
2224
2225 return ret;
2226}
2227
2228static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
2229{
2230 return regexec(regex, sym->name, 0, NULL, 0) == 0;
2231}
2232
2233static void ip__resolve_ams(struct thread *thread,
2234 struct addr_map_symbol *ams,
2235 u64 ip)
2236{
2237 struct addr_location al;
2238
2239 addr_location__init(&al);
2240 /*
2241 * We cannot use the header.misc hint to determine whether a
2242 * branch stack address is user, kernel, guest, hypervisor.
2243 * Branches may straddle the kernel/user/hypervisor boundaries.
2244 * Thus, we have to try consecutively until we find a match
2245 * or else, the symbol is unknown
2246 */
2247 thread__find_cpumode_addr_location(thread, ip, &al);
2248
2249 ams->addr = ip;
2250 ams->al_addr = al.addr;
2251 ams->al_level = al.level;
2252 ams->ms.maps = maps__get(al.maps);
2253 ams->ms.sym = al.sym;
2254 ams->ms.map = map__get(al.map);
2255 ams->phys_addr = 0;
2256 ams->data_page_size = 0;
2257 addr_location__exit(&al);
2258}
2259
2260static void ip__resolve_data(struct thread *thread,
2261 u8 m, struct addr_map_symbol *ams,
2262 u64 addr, u64 phys_addr, u64 daddr_page_size)
2263{
2264 struct addr_location al;
2265
2266 addr_location__init(&al);
2267
2268 thread__find_symbol(thread, m, addr, &al);
2269
2270 ams->addr = addr;
2271 ams->al_addr = al.addr;
2272 ams->al_level = al.level;
2273 ams->ms.maps = maps__get(al.maps);
2274 ams->ms.sym = al.sym;
2275 ams->ms.map = map__get(al.map);
2276 ams->phys_addr = phys_addr;
2277 ams->data_page_size = daddr_page_size;
2278 addr_location__exit(&al);
2279}
2280
2281struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2282 struct addr_location *al)
2283{
2284 struct mem_info *mi = mem_info__new();
2285
2286 if (!mi)
2287 return NULL;
2288
2289 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2290 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2291 sample->addr, sample->phys_addr,
2292 sample->data_page_size);
2293 mi->data_src.val = sample->data_src;
2294
2295 return mi;
2296}
2297
2298static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2299{
2300 struct map *map = ms->map;
2301 char *srcline = NULL;
2302 struct dso *dso;
2303
2304 if (!map || callchain_param.key == CCKEY_FUNCTION)
2305 return srcline;
2306
2307 dso = map__dso(map);
2308 srcline = srcline__tree_find(&dso->srclines, ip);
2309 if (!srcline) {
2310 bool show_sym = false;
2311 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2312
2313 srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2314 ms->sym, show_sym, show_addr, ip);
2315 srcline__tree_insert(&dso->srclines, ip, srcline);
2316 }
2317
2318 return srcline;
2319}
2320
2321struct iterations {
2322 int nr_loop_iter;
2323 u64 cycles;
2324};
2325
2326static int add_callchain_ip(struct thread *thread,
2327 struct callchain_cursor *cursor,
2328 struct symbol **parent,
2329 struct addr_location *root_al,
2330 u8 *cpumode,
2331 u64 ip,
2332 bool branch,
2333 struct branch_flags *flags,
2334 struct iterations *iter,
2335 u64 branch_from)
2336{
2337 struct map_symbol ms = {};
2338 struct addr_location al;
2339 int nr_loop_iter = 0, err = 0;
2340 u64 iter_cycles = 0;
2341 const char *srcline = NULL;
2342
2343 addr_location__init(&al);
2344 al.filtered = 0;
2345 al.sym = NULL;
2346 al.srcline = NULL;
2347 if (!cpumode) {
2348 thread__find_cpumode_addr_location(thread, ip, &al);
2349 } else {
2350 if (ip >= PERF_CONTEXT_MAX) {
2351 switch (ip) {
2352 case PERF_CONTEXT_HV:
2353 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2354 break;
2355 case PERF_CONTEXT_KERNEL:
2356 *cpumode = PERF_RECORD_MISC_KERNEL;
2357 break;
2358 case PERF_CONTEXT_USER:
2359 *cpumode = PERF_RECORD_MISC_USER;
2360 break;
2361 default:
2362 pr_debug("invalid callchain context: "
2363 "%"PRId64"\n", (s64) ip);
2364 /*
2365 * It seems the callchain is corrupted.
2366 * Discard all.
2367 */
2368 callchain_cursor_reset(cursor);
2369 err = 1;
2370 goto out;
2371 }
2372 goto out;
2373 }
2374 thread__find_symbol(thread, *cpumode, ip, &al);
2375 }
2376
2377 if (al.sym != NULL) {
2378 if (perf_hpp_list.parent && !*parent &&
2379 symbol__match_regex(al.sym, &parent_regex))
2380 *parent = al.sym;
2381 else if (have_ignore_callees && root_al &&
2382 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2383 /* Treat this symbol as the root,
2384 forgetting its callees. */
2385 addr_location__copy(root_al, &al);
2386 callchain_cursor_reset(cursor);
2387 }
2388 }
2389
2390 if (symbol_conf.hide_unresolved && al.sym == NULL)
2391 goto out;
2392
2393 if (iter) {
2394 nr_loop_iter = iter->nr_loop_iter;
2395 iter_cycles = iter->cycles;
2396 }
2397
2398 ms.maps = maps__get(al.maps);
2399 ms.map = map__get(al.map);
2400 ms.sym = al.sym;
2401 srcline = callchain_srcline(&ms, al.addr);
2402 err = callchain_cursor_append(cursor, ip, &ms,
2403 branch, flags, nr_loop_iter,
2404 iter_cycles, branch_from, srcline);
2405out:
2406 addr_location__exit(&al);
2407 map_symbol__exit(&ms);
2408 return err;
2409}
2410
2411struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2412 struct addr_location *al)
2413{
2414 unsigned int i;
2415 const struct branch_stack *bs = sample->branch_stack;
2416 struct branch_entry *entries = perf_sample__branch_entries(sample);
2417 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2418
2419 if (!bi)
2420 return NULL;
2421
2422 for (i = 0; i < bs->nr; i++) {
2423 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2424 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2425 bi[i].flags = entries[i].flags;
2426 }
2427 return bi;
2428}
2429
2430static void save_iterations(struct iterations *iter,
2431 struct branch_entry *be, int nr)
2432{
2433 int i;
2434
2435 iter->nr_loop_iter++;
2436 iter->cycles = 0;
2437
2438 for (i = 0; i < nr; i++)
2439 iter->cycles += be[i].flags.cycles;
2440}
2441
2442#define CHASHSZ 127
2443#define CHASHBITS 7
2444#define NO_ENTRY 0xff
2445
2446#define PERF_MAX_BRANCH_DEPTH 127
2447
2448/* Remove loops. */
2449static int remove_loops(struct branch_entry *l, int nr,
2450 struct iterations *iter)
2451{
2452 int i, j, off;
2453 unsigned char chash[CHASHSZ];
2454
2455 memset(chash, NO_ENTRY, sizeof(chash));
2456
2457 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2458
2459 for (i = 0; i < nr; i++) {
2460 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2461
2462 /* no collision handling for now */
2463 if (chash[h] == NO_ENTRY) {
2464 chash[h] = i;
2465 } else if (l[chash[h]].from == l[i].from) {
2466 bool is_loop = true;
2467 /* check if it is a real loop */
2468 off = 0;
2469 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2470 if (l[j].from != l[i + off].from) {
2471 is_loop = false;
2472 break;
2473 }
2474 if (is_loop) {
2475 j = nr - (i + off);
2476 if (j > 0) {
2477 save_iterations(iter + i + off,
2478 l + i, off);
2479
2480 memmove(iter + i, iter + i + off,
2481 j * sizeof(*iter));
2482
2483 memmove(l + i, l + i + off,
2484 j * sizeof(*l));
2485 }
2486
2487 nr -= off;
2488 }
2489 }
2490 }
2491 return nr;
2492}
2493
2494static int lbr_callchain_add_kernel_ip(struct thread *thread,
2495 struct callchain_cursor *cursor,
2496 struct perf_sample *sample,
2497 struct symbol **parent,
2498 struct addr_location *root_al,
2499 u64 branch_from,
2500 bool callee, int end)
2501{
2502 struct ip_callchain *chain = sample->callchain;
2503 u8 cpumode = PERF_RECORD_MISC_USER;
2504 int err, i;
2505
2506 if (callee) {
2507 for (i = 0; i < end + 1; i++) {
2508 err = add_callchain_ip(thread, cursor, parent,
2509 root_al, &cpumode, chain->ips[i],
2510 false, NULL, NULL, branch_from);
2511 if (err)
2512 return err;
2513 }
2514 return 0;
2515 }
2516
2517 for (i = end; i >= 0; i--) {
2518 err = add_callchain_ip(thread, cursor, parent,
2519 root_al, &cpumode, chain->ips[i],
2520 false, NULL, NULL, branch_from);
2521 if (err)
2522 return err;
2523 }
2524
2525 return 0;
2526}
2527
2528static void save_lbr_cursor_node(struct thread *thread,
2529 struct callchain_cursor *cursor,
2530 int idx)
2531{
2532 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2533
2534 if (!lbr_stitch)
2535 return;
2536
2537 if (cursor->pos == cursor->nr) {
2538 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2539 return;
2540 }
2541
2542 if (!cursor->curr)
2543 cursor->curr = cursor->first;
2544 else
2545 cursor->curr = cursor->curr->next;
2546 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2547 sizeof(struct callchain_cursor_node));
2548
2549 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2550 cursor->pos++;
2551}
2552
2553static int lbr_callchain_add_lbr_ip(struct thread *thread,
2554 struct callchain_cursor *cursor,
2555 struct perf_sample *sample,
2556 struct symbol **parent,
2557 struct addr_location *root_al,
2558 u64 *branch_from,
2559 bool callee)
2560{
2561 struct branch_stack *lbr_stack = sample->branch_stack;
2562 struct branch_entry *entries = perf_sample__branch_entries(sample);
2563 u8 cpumode = PERF_RECORD_MISC_USER;
2564 int lbr_nr = lbr_stack->nr;
2565 struct branch_flags *flags;
2566 int err, i;
2567 u64 ip;
2568
2569 /*
2570 * The curr and pos are not used in writing session. They are cleared
2571 * in callchain_cursor_commit() when the writing session is closed.
2572 * Using curr and pos to track the current cursor node.
2573 */
2574 if (thread__lbr_stitch(thread)) {
2575 cursor->curr = NULL;
2576 cursor->pos = cursor->nr;
2577 if (cursor->nr) {
2578 cursor->curr = cursor->first;
2579 for (i = 0; i < (int)(cursor->nr - 1); i++)
2580 cursor->curr = cursor->curr->next;
2581 }
2582 }
2583
2584 if (callee) {
2585 /* Add LBR ip from first entries.to */
2586 ip = entries[0].to;
2587 flags = &entries[0].flags;
2588 *branch_from = entries[0].from;
2589 err = add_callchain_ip(thread, cursor, parent,
2590 root_al, &cpumode, ip,
2591 true, flags, NULL,
2592 *branch_from);
2593 if (err)
2594 return err;
2595
2596 /*
2597 * The number of cursor node increases.
2598 * Move the current cursor node.
2599 * But does not need to save current cursor node for entry 0.
2600 * It's impossible to stitch the whole LBRs of previous sample.
2601 */
2602 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2603 if (!cursor->curr)
2604 cursor->curr = cursor->first;
2605 else
2606 cursor->curr = cursor->curr->next;
2607 cursor->pos++;
2608 }
2609
2610 /* Add LBR ip from entries.from one by one. */
2611 for (i = 0; i < lbr_nr; i++) {
2612 ip = entries[i].from;
2613 flags = &entries[i].flags;
2614 err = add_callchain_ip(thread, cursor, parent,
2615 root_al, &cpumode, ip,
2616 true, flags, NULL,
2617 *branch_from);
2618 if (err)
2619 return err;
2620 save_lbr_cursor_node(thread, cursor, i);
2621 }
2622 return 0;
2623 }
2624
2625 /* Add LBR ip from entries.from one by one. */
2626 for (i = lbr_nr - 1; i >= 0; i--) {
2627 ip = entries[i].from;
2628 flags = &entries[i].flags;
2629 err = add_callchain_ip(thread, cursor, parent,
2630 root_al, &cpumode, ip,
2631 true, flags, NULL,
2632 *branch_from);
2633 if (err)
2634 return err;
2635 save_lbr_cursor_node(thread, cursor, i);
2636 }
2637
2638 if (lbr_nr > 0) {
2639 /* Add LBR ip from first entries.to */
2640 ip = entries[0].to;
2641 flags = &entries[0].flags;
2642 *branch_from = entries[0].from;
2643 err = add_callchain_ip(thread, cursor, parent,
2644 root_al, &cpumode, ip,
2645 true, flags, NULL,
2646 *branch_from);
2647 if (err)
2648 return err;
2649 }
2650
2651 return 0;
2652}
2653
2654static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2655 struct callchain_cursor *cursor)
2656{
2657 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2658 struct callchain_cursor_node *cnode;
2659 struct stitch_list *stitch_node;
2660 int err;
2661
2662 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2663 cnode = &stitch_node->cursor;
2664
2665 err = callchain_cursor_append(cursor, cnode->ip,
2666 &cnode->ms,
2667 cnode->branch,
2668 &cnode->branch_flags,
2669 cnode->nr_loop_iter,
2670 cnode->iter_cycles,
2671 cnode->branch_from,
2672 cnode->srcline);
2673 if (err)
2674 return err;
2675 }
2676 return 0;
2677}
2678
2679static struct stitch_list *get_stitch_node(struct thread *thread)
2680{
2681 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2682 struct stitch_list *stitch_node;
2683
2684 if (!list_empty(&lbr_stitch->free_lists)) {
2685 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2686 struct stitch_list, node);
2687 list_del(&stitch_node->node);
2688
2689 return stitch_node;
2690 }
2691
2692 return malloc(sizeof(struct stitch_list));
2693}
2694
2695static bool has_stitched_lbr(struct thread *thread,
2696 struct perf_sample *cur,
2697 struct perf_sample *prev,
2698 unsigned int max_lbr,
2699 bool callee)
2700{
2701 struct branch_stack *cur_stack = cur->branch_stack;
2702 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2703 struct branch_stack *prev_stack = prev->branch_stack;
2704 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2705 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2706 int i, j, nr_identical_branches = 0;
2707 struct stitch_list *stitch_node;
2708 u64 cur_base, distance;
2709
2710 if (!cur_stack || !prev_stack)
2711 return false;
2712
2713 /* Find the physical index of the base-of-stack for current sample. */
2714 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2715
2716 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2717 (max_lbr + prev_stack->hw_idx - cur_base);
2718 /* Previous sample has shorter stack. Nothing can be stitched. */
2719 if (distance + 1 > prev_stack->nr)
2720 return false;
2721
2722 /*
2723 * Check if there are identical LBRs between two samples.
2724 * Identical LBRs must have same from, to and flags values. Also,
2725 * they have to be saved in the same LBR registers (same physical
2726 * index).
2727 *
2728 * Starts from the base-of-stack of current sample.
2729 */
2730 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2731 if ((prev_entries[i].from != cur_entries[j].from) ||
2732 (prev_entries[i].to != cur_entries[j].to) ||
2733 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2734 break;
2735 nr_identical_branches++;
2736 }
2737
2738 if (!nr_identical_branches)
2739 return false;
2740
2741 /*
2742 * Save the LBRs between the base-of-stack of previous sample
2743 * and the base-of-stack of current sample into lbr_stitch->lists.
2744 * These LBRs will be stitched later.
2745 */
2746 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2747
2748 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2749 continue;
2750
2751 stitch_node = get_stitch_node(thread);
2752 if (!stitch_node)
2753 return false;
2754
2755 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2756 sizeof(struct callchain_cursor_node));
2757
2758 if (callee)
2759 list_add(&stitch_node->node, &lbr_stitch->lists);
2760 else
2761 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2762 }
2763
2764 return true;
2765}
2766
2767static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2768{
2769 if (thread__lbr_stitch(thread))
2770 return true;
2771
2772 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2773 if (!thread__lbr_stitch(thread))
2774 goto err;
2775
2776 thread__lbr_stitch(thread)->prev_lbr_cursor =
2777 calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2778 if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2779 goto free_lbr_stitch;
2780
2781 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2782 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2783
2784 return true;
2785
2786free_lbr_stitch:
2787 free(thread__lbr_stitch(thread));
2788 thread__set_lbr_stitch(thread, NULL);
2789err:
2790 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2791 thread__set_lbr_stitch_enable(thread, false);
2792 return false;
2793}
2794
2795/*
2796 * Resolve LBR callstack chain sample
2797 * Return:
2798 * 1 on success get LBR callchain information
2799 * 0 no available LBR callchain information, should try fp
2800 * negative error code on other errors.
2801 */
2802static int resolve_lbr_callchain_sample(struct thread *thread,
2803 struct callchain_cursor *cursor,
2804 struct perf_sample *sample,
2805 struct symbol **parent,
2806 struct addr_location *root_al,
2807 int max_stack,
2808 unsigned int max_lbr)
2809{
2810 bool callee = (callchain_param.order == ORDER_CALLEE);
2811 struct ip_callchain *chain = sample->callchain;
2812 int chain_nr = min(max_stack, (int)chain->nr), i;
2813 struct lbr_stitch *lbr_stitch;
2814 bool stitched_lbr = false;
2815 u64 branch_from = 0;
2816 int err;
2817
2818 for (i = 0; i < chain_nr; i++) {
2819 if (chain->ips[i] == PERF_CONTEXT_USER)
2820 break;
2821 }
2822
2823 /* LBR only affects the user callchain */
2824 if (i == chain_nr)
2825 return 0;
2826
2827 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2828 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2829 lbr_stitch = thread__lbr_stitch(thread);
2830
2831 stitched_lbr = has_stitched_lbr(thread, sample,
2832 &lbr_stitch->prev_sample,
2833 max_lbr, callee);
2834
2835 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2836 list_replace_init(&lbr_stitch->lists,
2837 &lbr_stitch->free_lists);
2838 }
2839 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2840 }
2841
2842 if (callee) {
2843 /* Add kernel ip */
2844 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2845 parent, root_al, branch_from,
2846 true, i);
2847 if (err)
2848 goto error;
2849
2850 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2851 root_al, &branch_from, true);
2852 if (err)
2853 goto error;
2854
2855 if (stitched_lbr) {
2856 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2857 if (err)
2858 goto error;
2859 }
2860
2861 } else {
2862 if (stitched_lbr) {
2863 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2864 if (err)
2865 goto error;
2866 }
2867 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2868 root_al, &branch_from, false);
2869 if (err)
2870 goto error;
2871
2872 /* Add kernel ip */
2873 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2874 parent, root_al, branch_from,
2875 false, i);
2876 if (err)
2877 goto error;
2878 }
2879 return 1;
2880
2881error:
2882 return (err < 0) ? err : 0;
2883}
2884
2885static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2886 struct callchain_cursor *cursor,
2887 struct symbol **parent,
2888 struct addr_location *root_al,
2889 u8 *cpumode, int ent)
2890{
2891 int err = 0;
2892
2893 while (--ent >= 0) {
2894 u64 ip = chain->ips[ent];
2895
2896 if (ip >= PERF_CONTEXT_MAX) {
2897 err = add_callchain_ip(thread, cursor, parent,
2898 root_al, cpumode, ip,
2899 false, NULL, NULL, 0);
2900 break;
2901 }
2902 }
2903 return err;
2904}
2905
2906static u64 get_leaf_frame_caller(struct perf_sample *sample,
2907 struct thread *thread, int usr_idx)
2908{
2909 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2910 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2911 else
2912 return 0;
2913}
2914
2915static int thread__resolve_callchain_sample(struct thread *thread,
2916 struct callchain_cursor *cursor,
2917 struct evsel *evsel,
2918 struct perf_sample *sample,
2919 struct symbol **parent,
2920 struct addr_location *root_al,
2921 int max_stack)
2922{
2923 struct branch_stack *branch = sample->branch_stack;
2924 struct branch_entry *entries = perf_sample__branch_entries(sample);
2925 struct ip_callchain *chain = sample->callchain;
2926 int chain_nr = 0;
2927 u8 cpumode = PERF_RECORD_MISC_USER;
2928 int i, j, err, nr_entries, usr_idx;
2929 int skip_idx = -1;
2930 int first_call = 0;
2931 u64 leaf_frame_caller;
2932
2933 if (chain)
2934 chain_nr = chain->nr;
2935
2936 if (evsel__has_branch_callstack(evsel)) {
2937 struct perf_env *env = evsel__env(evsel);
2938
2939 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2940 root_al, max_stack,
2941 !env ? 0 : env->max_branches);
2942 if (err)
2943 return (err < 0) ? err : 0;
2944 }
2945
2946 /*
2947 * Based on DWARF debug information, some architectures skip
2948 * a callchain entry saved by the kernel.
2949 */
2950 skip_idx = arch_skip_callchain_idx(thread, chain);
2951
2952 /*
2953 * Add branches to call stack for easier browsing. This gives
2954 * more context for a sample than just the callers.
2955 *
2956 * This uses individual histograms of paths compared to the
2957 * aggregated histograms the normal LBR mode uses.
2958 *
2959 * Limitations for now:
2960 * - No extra filters
2961 * - No annotations (should annotate somehow)
2962 */
2963
2964 if (branch && callchain_param.branch_callstack) {
2965 int nr = min(max_stack, (int)branch->nr);
2966 struct branch_entry be[nr];
2967 struct iterations iter[nr];
2968
2969 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2970 pr_warning("corrupted branch chain. skipping...\n");
2971 goto check_calls;
2972 }
2973
2974 for (i = 0; i < nr; i++) {
2975 if (callchain_param.order == ORDER_CALLEE) {
2976 be[i] = entries[i];
2977
2978 if (chain == NULL)
2979 continue;
2980
2981 /*
2982 * Check for overlap into the callchain.
2983 * The return address is one off compared to
2984 * the branch entry. To adjust for this
2985 * assume the calling instruction is not longer
2986 * than 8 bytes.
2987 */
2988 if (i == skip_idx ||
2989 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2990 first_call++;
2991 else if (be[i].from < chain->ips[first_call] &&
2992 be[i].from >= chain->ips[first_call] - 8)
2993 first_call++;
2994 } else
2995 be[i] = entries[branch->nr - i - 1];
2996 }
2997
2998 memset(iter, 0, sizeof(struct iterations) * nr);
2999 nr = remove_loops(be, nr, iter);
3000
3001 for (i = 0; i < nr; i++) {
3002 err = add_callchain_ip(thread, cursor, parent,
3003 root_al,
3004 NULL, be[i].to,
3005 true, &be[i].flags,
3006 NULL, be[i].from);
3007
3008 if (!err)
3009 err = add_callchain_ip(thread, cursor, parent, root_al,
3010 NULL, be[i].from,
3011 true, &be[i].flags,
3012 &iter[i], 0);
3013 if (err == -EINVAL)
3014 break;
3015 if (err)
3016 return err;
3017 }
3018
3019 if (chain_nr == 0)
3020 return 0;
3021
3022 chain_nr -= nr;
3023 }
3024
3025check_calls:
3026 if (chain && callchain_param.order != ORDER_CALLEE) {
3027 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
3028 &cpumode, chain->nr - first_call);
3029 if (err)
3030 return (err < 0) ? err : 0;
3031 }
3032 for (i = first_call, nr_entries = 0;
3033 i < chain_nr && nr_entries < max_stack; i++) {
3034 u64 ip;
3035
3036 if (callchain_param.order == ORDER_CALLEE)
3037 j = i;
3038 else
3039 j = chain->nr - i - 1;
3040
3041#ifdef HAVE_SKIP_CALLCHAIN_IDX
3042 if (j == skip_idx)
3043 continue;
3044#endif
3045 ip = chain->ips[j];
3046 if (ip < PERF_CONTEXT_MAX)
3047 ++nr_entries;
3048 else if (callchain_param.order != ORDER_CALLEE) {
3049 err = find_prev_cpumode(chain, thread, cursor, parent,
3050 root_al, &cpumode, j);
3051 if (err)
3052 return (err < 0) ? err : 0;
3053 continue;
3054 }
3055
3056 /*
3057 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
3058 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
3059 * the index will be different in order to add the missing frame
3060 * at the right place.
3061 */
3062
3063 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
3064
3065 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
3066
3067 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
3068
3069 /*
3070 * check if leaf_frame_Caller != ip to not add the same
3071 * value twice.
3072 */
3073
3074 if (leaf_frame_caller && leaf_frame_caller != ip) {
3075
3076 err = add_callchain_ip(thread, cursor, parent,
3077 root_al, &cpumode, leaf_frame_caller,
3078 false, NULL, NULL, 0);
3079 if (err)
3080 return (err < 0) ? err : 0;
3081 }
3082 }
3083
3084 err = add_callchain_ip(thread, cursor, parent,
3085 root_al, &cpumode, ip,
3086 false, NULL, NULL, 0);
3087
3088 if (err)
3089 return (err < 0) ? err : 0;
3090 }
3091
3092 return 0;
3093}
3094
3095static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
3096{
3097 struct symbol *sym = ms->sym;
3098 struct map *map = ms->map;
3099 struct inline_node *inline_node;
3100 struct inline_list *ilist;
3101 struct dso *dso;
3102 u64 addr;
3103 int ret = 1;
3104 struct map_symbol ilist_ms;
3105
3106 if (!symbol_conf.inline_name || !map || !sym)
3107 return ret;
3108
3109 addr = map__dso_map_ip(map, ip);
3110 addr = map__rip_2objdump(map, addr);
3111 dso = map__dso(map);
3112
3113 inline_node = inlines__tree_find(&dso->inlined_nodes, addr);
3114 if (!inline_node) {
3115 inline_node = dso__parse_addr_inlines(dso, addr, sym);
3116 if (!inline_node)
3117 return ret;
3118 inlines__tree_insert(&dso->inlined_nodes, inline_node);
3119 }
3120
3121 ilist_ms = (struct map_symbol) {
3122 .maps = maps__get(ms->maps),
3123 .map = map__get(map),
3124 };
3125 list_for_each_entry(ilist, &inline_node->val, list) {
3126 ilist_ms.sym = ilist->symbol;
3127 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
3128 NULL, 0, 0, 0, ilist->srcline);
3129
3130 if (ret != 0)
3131 return ret;
3132 }
3133 map_symbol__exit(&ilist_ms);
3134
3135 return ret;
3136}
3137
3138static int unwind_entry(struct unwind_entry *entry, void *arg)
3139{
3140 struct callchain_cursor *cursor = arg;
3141 const char *srcline = NULL;
3142 u64 addr = entry->ip;
3143
3144 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
3145 return 0;
3146
3147 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
3148 return 0;
3149
3150 /*
3151 * Convert entry->ip from a virtual address to an offset in
3152 * its corresponding binary.
3153 */
3154 if (entry->ms.map)
3155 addr = map__dso_map_ip(entry->ms.map, entry->ip);
3156
3157 srcline = callchain_srcline(&entry->ms, addr);
3158 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
3159 false, NULL, 0, 0, 0, srcline);
3160}
3161
3162static int thread__resolve_callchain_unwind(struct thread *thread,
3163 struct callchain_cursor *cursor,
3164 struct evsel *evsel,
3165 struct perf_sample *sample,
3166 int max_stack)
3167{
3168 /* Can we do dwarf post unwind? */
3169 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
3170 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3171 return 0;
3172
3173 /* Bail out if nothing was captured. */
3174 if ((!sample->user_regs.regs) ||
3175 (!sample->user_stack.size))
3176 return 0;
3177
3178 return unwind__get_entries(unwind_entry, cursor,
3179 thread, sample, max_stack, false);
3180}
3181
3182int thread__resolve_callchain(struct thread *thread,
3183 struct callchain_cursor *cursor,
3184 struct evsel *evsel,
3185 struct perf_sample *sample,
3186 struct symbol **parent,
3187 struct addr_location *root_al,
3188 int max_stack)
3189{
3190 int ret = 0;
3191
3192 if (cursor == NULL)
3193 return -ENOMEM;
3194
3195 callchain_cursor_reset(cursor);
3196
3197 if (callchain_param.order == ORDER_CALLEE) {
3198 ret = thread__resolve_callchain_sample(thread, cursor,
3199 evsel, sample,
3200 parent, root_al,
3201 max_stack);
3202 if (ret)
3203 return ret;
3204 ret = thread__resolve_callchain_unwind(thread, cursor,
3205 evsel, sample,
3206 max_stack);
3207 } else {
3208 ret = thread__resolve_callchain_unwind(thread, cursor,
3209 evsel, sample,
3210 max_stack);
3211 if (ret)
3212 return ret;
3213 ret = thread__resolve_callchain_sample(thread, cursor,
3214 evsel, sample,
3215 parent, root_al,
3216 max_stack);
3217 }
3218
3219 return ret;
3220}
3221
3222int machine__for_each_thread(struct machine *machine,
3223 int (*fn)(struct thread *thread, void *p),
3224 void *priv)
3225{
3226 struct threads *threads;
3227 struct rb_node *nd;
3228 int rc = 0;
3229 int i;
3230
3231 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3232 threads = &machine->threads[i];
3233 for (nd = rb_first_cached(&threads->entries); nd;
3234 nd = rb_next(nd)) {
3235 struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
3236
3237 rc = fn(trb->thread, priv);
3238 if (rc != 0)
3239 return rc;
3240 }
3241 }
3242 return rc;
3243}
3244
3245int machines__for_each_thread(struct machines *machines,
3246 int (*fn)(struct thread *thread, void *p),
3247 void *priv)
3248{
3249 struct rb_node *nd;
3250 int rc = 0;
3251
3252 rc = machine__for_each_thread(&machines->host, fn, priv);
3253 if (rc != 0)
3254 return rc;
3255
3256 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3257 struct machine *machine = rb_entry(nd, struct machine, rb_node);
3258
3259 rc = machine__for_each_thread(machine, fn, priv);
3260 if (rc != 0)
3261 return rc;
3262 }
3263 return rc;
3264}
3265
3266pid_t machine__get_current_tid(struct machine *machine, int cpu)
3267{
3268 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3269 return -1;
3270
3271 return machine->current_tid[cpu];
3272}
3273
3274int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3275 pid_t tid)
3276{
3277 struct thread *thread;
3278 const pid_t init_val = -1;
3279
3280 if (cpu < 0)
3281 return -EINVAL;
3282
3283 if (realloc_array_as_needed(machine->current_tid,
3284 machine->current_tid_sz,
3285 (unsigned int)cpu,
3286 &init_val))
3287 return -ENOMEM;
3288
3289 machine->current_tid[cpu] = tid;
3290
3291 thread = machine__findnew_thread(machine, pid, tid);
3292 if (!thread)
3293 return -ENOMEM;
3294
3295 thread__set_cpu(thread, cpu);
3296 thread__put(thread);
3297
3298 return 0;
3299}
3300
3301/*
3302 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3303 * machine__normalized_is() if a normalized arch is needed.
3304 */
3305bool machine__is(struct machine *machine, const char *arch)
3306{
3307 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3308}
3309
3310bool machine__normalized_is(struct machine *machine, const char *arch)
3311{
3312 return machine && !strcmp(perf_env__arch(machine->env), arch);
3313}
3314
3315int machine__nr_cpus_avail(struct machine *machine)
3316{
3317 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3318}
3319
3320int machine__get_kernel_start(struct machine *machine)
3321{
3322 struct map *map = machine__kernel_map(machine);
3323 int err = 0;
3324
3325 /*
3326 * The only addresses above 2^63 are kernel addresses of a 64-bit
3327 * kernel. Note that addresses are unsigned so that on a 32-bit system
3328 * all addresses including kernel addresses are less than 2^32. In
3329 * that case (32-bit system), if the kernel mapping is unknown, all
3330 * addresses will be assumed to be in user space - see
3331 * machine__kernel_ip().
3332 */
3333 machine->kernel_start = 1ULL << 63;
3334 if (map) {
3335 err = map__load(map);
3336 /*
3337 * On x86_64, PTI entry trampolines are less than the
3338 * start of kernel text, but still above 2^63. So leave
3339 * kernel_start = 1ULL << 63 for x86_64.
3340 */
3341 if (!err && !machine__is(machine, "x86_64"))
3342 machine->kernel_start = map__start(map);
3343 }
3344 return err;
3345}
3346
3347u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3348{
3349 u8 addr_cpumode = cpumode;
3350 bool kernel_ip;
3351
3352 if (!machine->single_address_space)
3353 goto out;
3354
3355 kernel_ip = machine__kernel_ip(machine, addr);
3356 switch (cpumode) {
3357 case PERF_RECORD_MISC_KERNEL:
3358 case PERF_RECORD_MISC_USER:
3359 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3360 PERF_RECORD_MISC_USER;
3361 break;
3362 case PERF_RECORD_MISC_GUEST_KERNEL:
3363 case PERF_RECORD_MISC_GUEST_USER:
3364 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3365 PERF_RECORD_MISC_GUEST_USER;
3366 break;
3367 default:
3368 break;
3369 }
3370out:
3371 return addr_cpumode;
3372}
3373
3374struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3375{
3376 return dsos__findnew_id(&machine->dsos, filename, id);
3377}
3378
3379struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3380{
3381 return machine__findnew_dso_id(machine, filename, NULL);
3382}
3383
3384char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3385{
3386 struct machine *machine = vmachine;
3387 struct map *map;
3388 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3389
3390 if (sym == NULL)
3391 return NULL;
3392
3393 *modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL;
3394 *addrp = map__unmap_ip(map, sym->start);
3395 return sym->name;
3396}
3397
3398int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3399{
3400 struct dso *pos;
3401 int err = 0;
3402
3403 list_for_each_entry(pos, &machine->dsos.head, node) {
3404 if (fn(pos, machine, priv))
3405 err = -1;
3406 }
3407 return err;
3408}
3409
3410int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3411{
3412 struct maps *maps = machine__kernel_maps(machine);
3413
3414 return maps__for_each_map(maps, fn, priv);
3415}
3416
3417bool machine__is_lock_function(struct machine *machine, u64 addr)
3418{
3419 if (!machine->sched.text_start) {
3420 struct map *kmap;
3421 struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3422
3423 if (!sym) {
3424 /* to avoid retry */
3425 machine->sched.text_start = 1;
3426 return false;
3427 }
3428
3429 machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3430
3431 /* should not fail from here */
3432 sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3433 machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3434
3435 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3436 machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3437
3438 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3439 machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3440 }
3441
3442 /* failed to get kernel symbols */
3443 if (machine->sched.text_start == 1)
3444 return false;
3445
3446 /* mutex and rwsem functions are in sched text section */
3447 if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3448 return true;
3449
3450 /* spinlock functions are in lock text section */
3451 if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3452 return true;
3453
3454 return false;
3455}
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <inttypes.h>
5#include <regex.h>
6#include <stdlib.h>
7#include "callchain.h"
8#include "debug.h"
9#include "dso.h"
10#include "env.h"
11#include "event.h"
12#include "evsel.h"
13#include "hist.h"
14#include "machine.h"
15#include "map.h"
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
19#include "mem-info.h"
20#include "path.h"
21#include "srcline.h"
22#include "symbol.h"
23#include "sort.h"
24#include "strlist.h"
25#include "target.h"
26#include "thread.h"
27#include "util.h"
28#include "vdso.h"
29#include <stdbool.h>
30#include <sys/types.h>
31#include <sys/stat.h>
32#include <unistd.h>
33#include "unwind.h"
34#include "linux/hash.h"
35#include "asm/bug.h"
36#include "bpf-event.h"
37#include <internal/lib.h> // page_size
38#include "cgroup.h"
39#include "arm64-frame-pointer-unwind-support.h"
40
41#include <linux/ctype.h>
42#include <symbol/kallsyms.h>
43#include <linux/mman.h>
44#include <linux/string.h>
45#include <linux/zalloc.h>
46
47static struct dso *machine__kernel_dso(struct machine *machine)
48{
49 return map__dso(machine->vmlinux_map);
50}
51
52static int machine__set_mmap_name(struct machine *machine)
53{
54 if (machine__is_host(machine))
55 machine->mmap_name = strdup("[kernel.kallsyms]");
56 else if (machine__is_default_guest(machine))
57 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
58 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
59 machine->pid) < 0)
60 machine->mmap_name = NULL;
61
62 return machine->mmap_name ? 0 : -ENOMEM;
63}
64
65static void thread__set_guest_comm(struct thread *thread, pid_t pid)
66{
67 char comm[64];
68
69 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
70 thread__set_comm(thread, comm, 0);
71}
72
73int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
74{
75 int err = -ENOMEM;
76
77 memset(machine, 0, sizeof(*machine));
78 machine->kmaps = maps__new(machine);
79 if (machine->kmaps == NULL)
80 return -ENOMEM;
81
82 RB_CLEAR_NODE(&machine->rb_node);
83 dsos__init(&machine->dsos);
84
85 threads__init(&machine->threads);
86
87 machine->vdso_info = NULL;
88 machine->env = NULL;
89
90 machine->pid = pid;
91
92 machine->id_hdr_size = 0;
93 machine->kptr_restrict_warned = false;
94 machine->comm_exec = false;
95 machine->kernel_start = 0;
96 machine->vmlinux_map = NULL;
97
98 machine->root_dir = strdup(root_dir);
99 if (machine->root_dir == NULL)
100 goto out;
101
102 if (machine__set_mmap_name(machine))
103 goto out;
104
105 if (pid != HOST_KERNEL_ID) {
106 struct thread *thread = machine__findnew_thread(machine, -1,
107 pid);
108
109 if (thread == NULL)
110 goto out;
111
112 thread__set_guest_comm(thread, pid);
113 thread__put(thread);
114 }
115
116 machine->current_tid = NULL;
117 err = 0;
118
119out:
120 if (err) {
121 zfree(&machine->kmaps);
122 zfree(&machine->root_dir);
123 zfree(&machine->mmap_name);
124 }
125 return 0;
126}
127
128struct machine *machine__new_host(void)
129{
130 struct machine *machine = malloc(sizeof(*machine));
131
132 if (machine != NULL) {
133 machine__init(machine, "", HOST_KERNEL_ID);
134
135 if (machine__create_kernel_maps(machine) < 0)
136 goto out_delete;
137
138 machine->env = &perf_env;
139 }
140
141 return machine;
142out_delete:
143 free(machine);
144 return NULL;
145}
146
147struct machine *machine__new_kallsyms(void)
148{
149 struct machine *machine = machine__new_host();
150 /*
151 * FIXME:
152 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
153 * ask for not using the kcore parsing code, once this one is fixed
154 * to create a map per module.
155 */
156 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
157 machine__delete(machine);
158 machine = NULL;
159 }
160
161 return machine;
162}
163
164void machine__delete_threads(struct machine *machine)
165{
166 threads__remove_all_threads(&machine->threads);
167}
168
169void machine__exit(struct machine *machine)
170{
171 if (machine == NULL)
172 return;
173
174 machine__destroy_kernel_maps(machine);
175 maps__zput(machine->kmaps);
176 dsos__exit(&machine->dsos);
177 machine__exit_vdso(machine);
178 zfree(&machine->root_dir);
179 zfree(&machine->mmap_name);
180 zfree(&machine->current_tid);
181 zfree(&machine->kallsyms_filename);
182
183 threads__exit(&machine->threads);
184}
185
186void machine__delete(struct machine *machine)
187{
188 if (machine) {
189 machine__exit(machine);
190 free(machine);
191 }
192}
193
194void machines__init(struct machines *machines)
195{
196 machine__init(&machines->host, "", HOST_KERNEL_ID);
197 machines->guests = RB_ROOT_CACHED;
198}
199
200void machines__exit(struct machines *machines)
201{
202 machine__exit(&machines->host);
203 /* XXX exit guest */
204}
205
206struct machine *machines__add(struct machines *machines, pid_t pid,
207 const char *root_dir)
208{
209 struct rb_node **p = &machines->guests.rb_root.rb_node;
210 struct rb_node *parent = NULL;
211 struct machine *pos, *machine = malloc(sizeof(*machine));
212 bool leftmost = true;
213
214 if (machine == NULL)
215 return NULL;
216
217 if (machine__init(machine, root_dir, pid) != 0) {
218 free(machine);
219 return NULL;
220 }
221
222 while (*p != NULL) {
223 parent = *p;
224 pos = rb_entry(parent, struct machine, rb_node);
225 if (pid < pos->pid)
226 p = &(*p)->rb_left;
227 else {
228 p = &(*p)->rb_right;
229 leftmost = false;
230 }
231 }
232
233 rb_link_node(&machine->rb_node, parent, p);
234 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
235
236 machine->machines = machines;
237
238 return machine;
239}
240
241void machines__set_comm_exec(struct machines *machines, bool comm_exec)
242{
243 struct rb_node *nd;
244
245 machines->host.comm_exec = comm_exec;
246
247 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
248 struct machine *machine = rb_entry(nd, struct machine, rb_node);
249
250 machine->comm_exec = comm_exec;
251 }
252}
253
254struct machine *machines__find(struct machines *machines, pid_t pid)
255{
256 struct rb_node **p = &machines->guests.rb_root.rb_node;
257 struct rb_node *parent = NULL;
258 struct machine *machine;
259 struct machine *default_machine = NULL;
260
261 if (pid == HOST_KERNEL_ID)
262 return &machines->host;
263
264 while (*p != NULL) {
265 parent = *p;
266 machine = rb_entry(parent, struct machine, rb_node);
267 if (pid < machine->pid)
268 p = &(*p)->rb_left;
269 else if (pid > machine->pid)
270 p = &(*p)->rb_right;
271 else
272 return machine;
273 if (!machine->pid)
274 default_machine = machine;
275 }
276
277 return default_machine;
278}
279
280struct machine *machines__findnew(struct machines *machines, pid_t pid)
281{
282 char path[PATH_MAX];
283 const char *root_dir = "";
284 struct machine *machine = machines__find(machines, pid);
285
286 if (machine && (machine->pid == pid))
287 goto out;
288
289 if ((pid != HOST_KERNEL_ID) &&
290 (pid != DEFAULT_GUEST_KERNEL_ID) &&
291 (symbol_conf.guestmount)) {
292 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
293 if (access(path, R_OK)) {
294 static struct strlist *seen;
295
296 if (!seen)
297 seen = strlist__new(NULL, NULL);
298
299 if (!strlist__has_entry(seen, path)) {
300 pr_err("Can't access file %s\n", path);
301 strlist__add(seen, path);
302 }
303 machine = NULL;
304 goto out;
305 }
306 root_dir = path;
307 }
308
309 machine = machines__add(machines, pid, root_dir);
310out:
311 return machine;
312}
313
314struct machine *machines__find_guest(struct machines *machines, pid_t pid)
315{
316 struct machine *machine = machines__find(machines, pid);
317
318 if (!machine)
319 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
320 return machine;
321}
322
323/*
324 * A common case for KVM test programs is that the test program acts as the
325 * hypervisor, creating, running and destroying the virtual machine, and
326 * providing the guest object code from its own object code. In this case,
327 * the VM is not running an OS, but only the functions loaded into it by the
328 * hypervisor test program, and conveniently, loaded at the same virtual
329 * addresses.
330 *
331 * Normally to resolve addresses, MMAP events are needed to map addresses
332 * back to the object code and debug symbols for that object code.
333 *
334 * Currently, there is no way to get such mapping information from guests
335 * but, in the scenario described above, the guest has the same mappings
336 * as the hypervisor, so support for that scenario can be achieved.
337 *
338 * To support that, copy the host thread's maps to the guest thread's maps.
339 * Note, we do not discover the guest until we encounter a guest event,
340 * which works well because it is not until then that we know that the host
341 * thread's maps have been set up.
342 *
343 * This function returns the guest thread. Apart from keeping the data
344 * structures sane, using a thread belonging to the guest machine, instead
345 * of the host thread, allows it to have its own comm (refer
346 * thread__set_guest_comm()).
347 */
348static struct thread *findnew_guest_code(struct machine *machine,
349 struct machine *host_machine,
350 pid_t pid)
351{
352 struct thread *host_thread;
353 struct thread *thread;
354 int err;
355
356 if (!machine)
357 return NULL;
358
359 thread = machine__findnew_thread(machine, -1, pid);
360 if (!thread)
361 return NULL;
362
363 /* Assume maps are set up if there are any */
364 if (!maps__empty(thread__maps(thread)))
365 return thread;
366
367 host_thread = machine__find_thread(host_machine, -1, pid);
368 if (!host_thread)
369 goto out_err;
370
371 thread__set_guest_comm(thread, pid);
372
373 /*
374 * Guest code can be found in hypervisor process at the same address
375 * so copy host maps.
376 */
377 err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
378 thread__put(host_thread);
379 if (err)
380 goto out_err;
381
382 return thread;
383
384out_err:
385 thread__zput(thread);
386 return NULL;
387}
388
389struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
390{
391 struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
392 struct machine *machine = machines__findnew(machines, pid);
393
394 return findnew_guest_code(machine, host_machine, pid);
395}
396
397struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
398{
399 struct machines *machines = machine->machines;
400 struct machine *host_machine;
401
402 if (!machines)
403 return NULL;
404
405 host_machine = machines__find(machines, HOST_KERNEL_ID);
406
407 return findnew_guest_code(machine, host_machine, pid);
408}
409
410void machines__process_guests(struct machines *machines,
411 machine__process_t process, void *data)
412{
413 struct rb_node *nd;
414
415 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
416 struct machine *pos = rb_entry(nd, struct machine, rb_node);
417 process(pos, data);
418 }
419}
420
421void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
422{
423 struct rb_node *node;
424 struct machine *machine;
425
426 machines->host.id_hdr_size = id_hdr_size;
427
428 for (node = rb_first_cached(&machines->guests); node;
429 node = rb_next(node)) {
430 machine = rb_entry(node, struct machine, rb_node);
431 machine->id_hdr_size = id_hdr_size;
432 }
433
434 return;
435}
436
437static void machine__update_thread_pid(struct machine *machine,
438 struct thread *th, pid_t pid)
439{
440 struct thread *leader;
441
442 if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
443 return;
444
445 thread__set_pid(th, pid);
446
447 if (thread__pid(th) == thread__tid(th))
448 return;
449
450 leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
451 if (!leader)
452 goto out_err;
453
454 if (!thread__maps(leader))
455 thread__set_maps(leader, maps__new(machine));
456
457 if (!thread__maps(leader))
458 goto out_err;
459
460 if (thread__maps(th) == thread__maps(leader))
461 goto out_put;
462
463 if (thread__maps(th)) {
464 /*
465 * Maps are created from MMAP events which provide the pid and
466 * tid. Consequently there never should be any maps on a thread
467 * with an unknown pid. Just print an error if there are.
468 */
469 if (!maps__empty(thread__maps(th)))
470 pr_err("Discarding thread maps for %d:%d\n",
471 thread__pid(th), thread__tid(th));
472 maps__put(thread__maps(th));
473 }
474
475 thread__set_maps(th, maps__get(thread__maps(leader)));
476out_put:
477 thread__put(leader);
478 return;
479out_err:
480 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
481 goto out_put;
482}
483
484/*
485 * Caller must eventually drop thread->refcnt returned with a successful
486 * lookup/new thread inserted.
487 */
488static struct thread *__machine__findnew_thread(struct machine *machine,
489 pid_t pid,
490 pid_t tid,
491 bool create)
492{
493 struct thread *th = threads__find(&machine->threads, tid);
494 bool created;
495
496 if (th) {
497 machine__update_thread_pid(machine, th, pid);
498 return th;
499 }
500 if (!create)
501 return NULL;
502
503 th = threads__findnew(&machine->threads, pid, tid, &created);
504 if (created) {
505 /*
506 * We have to initialize maps separately after rb tree is
507 * updated.
508 *
509 * The reason is that we call machine__findnew_thread within
510 * thread__init_maps to find the thread leader and that would
511 * screwed the rb tree.
512 */
513 if (thread__init_maps(th, machine)) {
514 pr_err("Thread init failed thread %d\n", pid);
515 threads__remove(&machine->threads, th);
516 thread__put(th);
517 return NULL;
518 }
519 } else
520 machine__update_thread_pid(machine, th, pid);
521
522 return th;
523}
524
525struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
526{
527 return __machine__findnew_thread(machine, pid, tid, /*create=*/true);
528}
529
530struct thread *machine__find_thread(struct machine *machine, pid_t pid,
531 pid_t tid)
532{
533 return __machine__findnew_thread(machine, pid, tid, /*create=*/false);
534}
535
536/*
537 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
538 * So here a single thread is created for that, but actually there is a separate
539 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
540 * is only 1. That causes problems for some tools, requiring workarounds. For
541 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
542 */
543struct thread *machine__idle_thread(struct machine *machine)
544{
545 struct thread *thread = machine__findnew_thread(machine, 0, 0);
546
547 if (!thread || thread__set_comm(thread, "swapper", 0) ||
548 thread__set_namespaces(thread, 0, NULL))
549 pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
550
551 return thread;
552}
553
554struct comm *machine__thread_exec_comm(struct machine *machine,
555 struct thread *thread)
556{
557 if (machine->comm_exec)
558 return thread__exec_comm(thread);
559 else
560 return thread__comm(thread);
561}
562
563int machine__process_comm_event(struct machine *machine, union perf_event *event,
564 struct perf_sample *sample)
565{
566 struct thread *thread = machine__findnew_thread(machine,
567 event->comm.pid,
568 event->comm.tid);
569 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
570 int err = 0;
571
572 if (exec)
573 machine->comm_exec = true;
574
575 if (dump_trace)
576 perf_event__fprintf_comm(event, stdout);
577
578 if (thread == NULL ||
579 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
580 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
581 err = -1;
582 }
583
584 thread__put(thread);
585
586 return err;
587}
588
589int machine__process_namespaces_event(struct machine *machine __maybe_unused,
590 union perf_event *event,
591 struct perf_sample *sample __maybe_unused)
592{
593 struct thread *thread = machine__findnew_thread(machine,
594 event->namespaces.pid,
595 event->namespaces.tid);
596 int err = 0;
597
598 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
599 "\nWARNING: kernel seems to support more namespaces than perf"
600 " tool.\nTry updating the perf tool..\n\n");
601
602 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
603 "\nWARNING: perf tool seems to support more namespaces than"
604 " the kernel.\nTry updating the kernel..\n\n");
605
606 if (dump_trace)
607 perf_event__fprintf_namespaces(event, stdout);
608
609 if (thread == NULL ||
610 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
611 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
612 err = -1;
613 }
614
615 thread__put(thread);
616
617 return err;
618}
619
620int machine__process_cgroup_event(struct machine *machine,
621 union perf_event *event,
622 struct perf_sample *sample __maybe_unused)
623{
624 struct cgroup *cgrp;
625
626 if (dump_trace)
627 perf_event__fprintf_cgroup(event, stdout);
628
629 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
630 if (cgrp == NULL)
631 return -ENOMEM;
632
633 return 0;
634}
635
636int machine__process_lost_event(struct machine *machine __maybe_unused,
637 union perf_event *event, struct perf_sample *sample __maybe_unused)
638{
639 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
640 event->lost.id, event->lost.lost);
641 return 0;
642}
643
644int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
645 union perf_event *event, struct perf_sample *sample)
646{
647 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n",
648 sample->id, event->lost_samples.lost,
649 event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : "");
650 return 0;
651}
652
653int machine__process_aux_event(struct machine *machine __maybe_unused,
654 union perf_event *event)
655{
656 if (dump_trace)
657 perf_event__fprintf_aux(event, stdout);
658 return 0;
659}
660
661int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
662 union perf_event *event)
663{
664 if (dump_trace)
665 perf_event__fprintf_itrace_start(event, stdout);
666 return 0;
667}
668
669int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
670 union perf_event *event)
671{
672 if (dump_trace)
673 perf_event__fprintf_aux_output_hw_id(event, stdout);
674 return 0;
675}
676
677int machine__process_switch_event(struct machine *machine __maybe_unused,
678 union perf_event *event)
679{
680 if (dump_trace)
681 perf_event__fprintf_switch(event, stdout);
682 return 0;
683}
684
685static int machine__process_ksymbol_register(struct machine *machine,
686 union perf_event *event,
687 struct perf_sample *sample __maybe_unused)
688{
689 struct symbol *sym;
690 struct dso *dso = NULL;
691 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
692 int err = 0;
693
694 if (!map) {
695 dso = dso__new(event->ksymbol.name);
696
697 if (!dso) {
698 err = -ENOMEM;
699 goto out;
700 }
701 dso__set_kernel(dso, DSO_SPACE__KERNEL);
702 map = map__new2(0, dso);
703 if (!map) {
704 err = -ENOMEM;
705 goto out;
706 }
707 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
708 dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL);
709 dso__data(dso)->file_size = event->ksymbol.len;
710 dso__set_loaded(dso);
711 }
712
713 map__set_start(map, event->ksymbol.addr);
714 map__set_end(map, map__start(map) + event->ksymbol.len);
715 err = maps__insert(machine__kernel_maps(machine), map);
716 if (err) {
717 err = -ENOMEM;
718 goto out;
719 }
720
721 dso__set_loaded(dso);
722
723 if (is_bpf_image(event->ksymbol.name)) {
724 dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE);
725 dso__set_long_name(dso, "", false);
726 }
727 } else {
728 dso = dso__get(map__dso(map));
729 }
730
731 sym = symbol__new(map__map_ip(map, map__start(map)),
732 event->ksymbol.len,
733 0, 0, event->ksymbol.name);
734 if (!sym) {
735 err = -ENOMEM;
736 goto out;
737 }
738 dso__insert_symbol(dso, sym);
739out:
740 map__put(map);
741 dso__put(dso);
742 return err;
743}
744
745static int machine__process_ksymbol_unregister(struct machine *machine,
746 union perf_event *event,
747 struct perf_sample *sample __maybe_unused)
748{
749 struct symbol *sym;
750 struct map *map;
751
752 map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
753 if (!map)
754 return 0;
755
756 if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
757 maps__remove(machine__kernel_maps(machine), map);
758 else {
759 struct dso *dso = map__dso(map);
760
761 sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
762 if (sym)
763 dso__delete_symbol(dso, sym);
764 }
765 map__put(map);
766 return 0;
767}
768
769int machine__process_ksymbol(struct machine *machine __maybe_unused,
770 union perf_event *event,
771 struct perf_sample *sample)
772{
773 if (dump_trace)
774 perf_event__fprintf_ksymbol(event, stdout);
775
776 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
777 return machine__process_ksymbol_unregister(machine, event,
778 sample);
779 return machine__process_ksymbol_register(machine, event, sample);
780}
781
782int machine__process_text_poke(struct machine *machine, union perf_event *event,
783 struct perf_sample *sample __maybe_unused)
784{
785 struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
786 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
787 struct dso *dso = map ? map__dso(map) : NULL;
788
789 if (dump_trace)
790 perf_event__fprintf_text_poke(event, machine, stdout);
791
792 if (!event->text_poke.new_len)
793 goto out;
794
795 if (cpumode != PERF_RECORD_MISC_KERNEL) {
796 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
797 goto out;
798 }
799
800 if (dso) {
801 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
802 int ret;
803
804 /*
805 * Kernel maps might be changed when loading symbols so loading
806 * must be done prior to using kernel maps.
807 */
808 map__load(map);
809 ret = dso__data_write_cache_addr(dso, map, machine,
810 event->text_poke.addr,
811 new_bytes,
812 event->text_poke.new_len);
813 if (ret != event->text_poke.new_len)
814 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
815 event->text_poke.addr);
816 } else {
817 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
818 event->text_poke.addr);
819 }
820out:
821 map__put(map);
822 return 0;
823}
824
825static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
826 const char *filename)
827{
828 struct map *map = NULL;
829 struct kmod_path m;
830 struct dso *dso;
831 int err;
832
833 if (kmod_path__parse_name(&m, filename))
834 return NULL;
835
836 dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename);
837 if (dso == NULL)
838 goto out;
839
840 map = map__new2(start, dso);
841 if (map == NULL)
842 goto out;
843
844 err = maps__insert(machine__kernel_maps(machine), map);
845 /* If maps__insert failed, return NULL. */
846 if (err) {
847 map__put(map);
848 map = NULL;
849 }
850out:
851 /* put the dso here, corresponding to machine__findnew_module_dso */
852 dso__put(dso);
853 zfree(&m.name);
854 return map;
855}
856
857size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
858{
859 struct rb_node *nd;
860 size_t ret = dsos__fprintf(&machines->host.dsos, fp);
861
862 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
863 struct machine *pos = rb_entry(nd, struct machine, rb_node);
864 ret += dsos__fprintf(&pos->dsos, fp);
865 }
866
867 return ret;
868}
869
870size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
871 bool (skip)(struct dso *dso, int parm), int parm)
872{
873 return dsos__fprintf_buildid(&m->dsos, fp, skip, parm);
874}
875
876size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
877 bool (skip)(struct dso *dso, int parm), int parm)
878{
879 struct rb_node *nd;
880 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
881
882 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
883 struct machine *pos = rb_entry(nd, struct machine, rb_node);
884 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
885 }
886 return ret;
887}
888
889size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
890{
891 int i;
892 size_t printed = 0;
893 struct dso *kdso = machine__kernel_dso(machine);
894
895 if (dso__has_build_id(kdso)) {
896 char filename[PATH_MAX];
897
898 if (dso__build_id_filename(kdso, filename, sizeof(filename), false))
899 printed += fprintf(fp, "[0] %s\n", filename);
900 }
901
902 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
903 printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso),
904 vmlinux_path[i]);
905 }
906 return printed;
907}
908
909struct machine_fprintf_cb_args {
910 FILE *fp;
911 size_t printed;
912};
913
914static int machine_fprintf_cb(struct thread *thread, void *data)
915{
916 struct machine_fprintf_cb_args *args = data;
917
918 /* TODO: handle fprintf errors. */
919 args->printed += thread__fprintf(thread, args->fp);
920 return 0;
921}
922
923size_t machine__fprintf(struct machine *machine, FILE *fp)
924{
925 struct machine_fprintf_cb_args args = {
926 .fp = fp,
927 .printed = 0,
928 };
929 size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads));
930
931 machine__for_each_thread(machine, machine_fprintf_cb, &args);
932 return ret + args.printed;
933}
934
935static struct dso *machine__get_kernel(struct machine *machine)
936{
937 const char *vmlinux_name = machine->mmap_name;
938 struct dso *kernel;
939
940 if (machine__is_host(machine)) {
941 if (symbol_conf.vmlinux_name)
942 vmlinux_name = symbol_conf.vmlinux_name;
943
944 kernel = machine__findnew_kernel(machine, vmlinux_name,
945 "[kernel]", DSO_SPACE__KERNEL);
946 } else {
947 if (symbol_conf.default_guest_vmlinux_name)
948 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
949
950 kernel = machine__findnew_kernel(machine, vmlinux_name,
951 "[guest.kernel]",
952 DSO_SPACE__KERNEL_GUEST);
953 }
954
955 if (kernel != NULL && (!dso__has_build_id(kernel)))
956 dso__read_running_kernel_build_id(kernel, machine);
957
958 return kernel;
959}
960
961void machine__get_kallsyms_filename(struct machine *machine, char *buf,
962 size_t bufsz)
963{
964 if (machine__is_default_guest(machine))
965 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
966 else
967 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
968}
969
970const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
971
972/* Figure out the start address of kernel map from /proc/kallsyms.
973 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
974 * symbol_name if it's not that important.
975 */
976static int machine__get_running_kernel_start(struct machine *machine,
977 const char **symbol_name,
978 u64 *start, u64 *end)
979{
980 char filename[PATH_MAX];
981 int i, err = -1;
982 const char *name;
983 u64 addr = 0;
984
985 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
986
987 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
988 return 0;
989
990 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
991 err = kallsyms__get_function_start(filename, name, &addr);
992 if (!err)
993 break;
994 }
995
996 if (err)
997 return -1;
998
999 if (symbol_name)
1000 *symbol_name = name;
1001
1002 *start = addr;
1003
1004 err = kallsyms__get_symbol_start(filename, "_edata", &addr);
1005 if (err)
1006 err = kallsyms__get_symbol_start(filename, "_etext", &addr);
1007 if (!err)
1008 *end = addr;
1009
1010 return 0;
1011}
1012
1013int machine__create_extra_kernel_map(struct machine *machine,
1014 struct dso *kernel,
1015 struct extra_kernel_map *xm)
1016{
1017 struct kmap *kmap;
1018 struct map *map;
1019 int err;
1020
1021 map = map__new2(xm->start, kernel);
1022 if (!map)
1023 return -ENOMEM;
1024
1025 map__set_end(map, xm->end);
1026 map__set_pgoff(map, xm->pgoff);
1027
1028 kmap = map__kmap(map);
1029
1030 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1031
1032 err = maps__insert(machine__kernel_maps(machine), map);
1033
1034 if (!err) {
1035 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1036 kmap->name, map__start(map), map__end(map));
1037 }
1038
1039 map__put(map);
1040
1041 return err;
1042}
1043
1044static u64 find_entry_trampoline(struct dso *dso)
1045{
1046 /* Duplicates are removed so lookup all aliases */
1047 const char *syms[] = {
1048 "_entry_trampoline",
1049 "__entry_trampoline_start",
1050 "entry_SYSCALL_64_trampoline",
1051 };
1052 struct symbol *sym = dso__first_symbol(dso);
1053 unsigned int i;
1054
1055 for (; sym; sym = dso__next_symbol(sym)) {
1056 if (sym->binding != STB_GLOBAL)
1057 continue;
1058 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1059 if (!strcmp(sym->name, syms[i]))
1060 return sym->start;
1061 }
1062 }
1063
1064 return 0;
1065}
1066
1067/*
1068 * These values can be used for kernels that do not have symbols for the entry
1069 * trampolines in kallsyms.
1070 */
1071#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1072#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1073#define X86_64_ENTRY_TRAMPOLINE 0x6000
1074
1075struct machine__map_x86_64_entry_trampolines_args {
1076 struct maps *kmaps;
1077 bool found;
1078};
1079
1080static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
1081{
1082 struct machine__map_x86_64_entry_trampolines_args *args = data;
1083 struct map *dest_map;
1084 struct kmap *kmap = __map__kmap(map);
1085
1086 if (!kmap || !is_entry_trampoline(kmap->name))
1087 return 0;
1088
1089 dest_map = maps__find(args->kmaps, map__pgoff(map));
1090 if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map))
1091 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1092
1093 map__put(dest_map);
1094 args->found = true;
1095 return 0;
1096}
1097
1098/* Map x86_64 PTI entry trampolines */
1099int machine__map_x86_64_entry_trampolines(struct machine *machine,
1100 struct dso *kernel)
1101{
1102 struct machine__map_x86_64_entry_trampolines_args args = {
1103 .kmaps = machine__kernel_maps(machine),
1104 .found = false,
1105 };
1106 int nr_cpus_avail, cpu;
1107 u64 pgoff;
1108
1109 /*
1110 * In the vmlinux case, pgoff is a virtual address which must now be
1111 * mapped to a vmlinux offset.
1112 */
1113 maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
1114
1115 if (args.found || machine->trampolines_mapped)
1116 return 0;
1117
1118 pgoff = find_entry_trampoline(kernel);
1119 if (!pgoff)
1120 return 0;
1121
1122 nr_cpus_avail = machine__nr_cpus_avail(machine);
1123
1124 /* Add a 1 page map for each CPU's entry trampoline */
1125 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1126 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1127 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1128 X86_64_ENTRY_TRAMPOLINE;
1129 struct extra_kernel_map xm = {
1130 .start = va,
1131 .end = va + page_size,
1132 .pgoff = pgoff,
1133 };
1134
1135 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1136
1137 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1138 return -1;
1139 }
1140
1141 machine->trampolines_mapped = nr_cpus_avail;
1142
1143 return 0;
1144}
1145
1146int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1147 struct dso *kernel __maybe_unused)
1148{
1149 return 0;
1150}
1151
1152static int
1153__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1154{
1155 /* In case of renewal the kernel map, destroy previous one */
1156 machine__destroy_kernel_maps(machine);
1157
1158 map__put(machine->vmlinux_map);
1159 machine->vmlinux_map = map__new2(0, kernel);
1160 if (machine->vmlinux_map == NULL)
1161 return -ENOMEM;
1162
1163 map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
1164 return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1165}
1166
1167void machine__destroy_kernel_maps(struct machine *machine)
1168{
1169 struct kmap *kmap;
1170 struct map *map = machine__kernel_map(machine);
1171
1172 if (map == NULL)
1173 return;
1174
1175 kmap = map__kmap(map);
1176 maps__remove(machine__kernel_maps(machine), map);
1177 if (kmap && kmap->ref_reloc_sym) {
1178 zfree((char **)&kmap->ref_reloc_sym->name);
1179 zfree(&kmap->ref_reloc_sym);
1180 }
1181
1182 map__zput(machine->vmlinux_map);
1183}
1184
1185int machines__create_guest_kernel_maps(struct machines *machines)
1186{
1187 int ret = 0;
1188 struct dirent **namelist = NULL;
1189 int i, items = 0;
1190 char path[PATH_MAX];
1191 pid_t pid;
1192 char *endp;
1193
1194 if (symbol_conf.default_guest_vmlinux_name ||
1195 symbol_conf.default_guest_modules ||
1196 symbol_conf.default_guest_kallsyms) {
1197 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1198 }
1199
1200 if (symbol_conf.guestmount) {
1201 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1202 if (items <= 0)
1203 return -ENOENT;
1204 for (i = 0; i < items; i++) {
1205 if (!isdigit(namelist[i]->d_name[0])) {
1206 /* Filter out . and .. */
1207 continue;
1208 }
1209 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1210 if ((*endp != '\0') ||
1211 (endp == namelist[i]->d_name) ||
1212 (errno == ERANGE)) {
1213 pr_debug("invalid directory (%s). Skipping.\n",
1214 namelist[i]->d_name);
1215 continue;
1216 }
1217 sprintf(path, "%s/%s/proc/kallsyms",
1218 symbol_conf.guestmount,
1219 namelist[i]->d_name);
1220 ret = access(path, R_OK);
1221 if (ret) {
1222 pr_debug("Can't access file %s\n", path);
1223 goto failure;
1224 }
1225 machines__create_kernel_maps(machines, pid);
1226 }
1227failure:
1228 free(namelist);
1229 }
1230
1231 return ret;
1232}
1233
1234void machines__destroy_kernel_maps(struct machines *machines)
1235{
1236 struct rb_node *next = rb_first_cached(&machines->guests);
1237
1238 machine__destroy_kernel_maps(&machines->host);
1239
1240 while (next) {
1241 struct machine *pos = rb_entry(next, struct machine, rb_node);
1242
1243 next = rb_next(&pos->rb_node);
1244 rb_erase_cached(&pos->rb_node, &machines->guests);
1245 machine__delete(pos);
1246 }
1247}
1248
1249int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1250{
1251 struct machine *machine = machines__findnew(machines, pid);
1252
1253 if (machine == NULL)
1254 return -1;
1255
1256 return machine__create_kernel_maps(machine);
1257}
1258
1259int machine__load_kallsyms(struct machine *machine, const char *filename)
1260{
1261 struct map *map = machine__kernel_map(machine);
1262 struct dso *dso = map__dso(map);
1263 int ret = __dso__load_kallsyms(dso, filename, map, true);
1264
1265 if (ret > 0) {
1266 dso__set_loaded(dso);
1267 /*
1268 * Since /proc/kallsyms will have multiple sessions for the
1269 * kernel, with modules between them, fixup the end of all
1270 * sections.
1271 */
1272 maps__fixup_end(machine__kernel_maps(machine));
1273 }
1274
1275 return ret;
1276}
1277
1278int machine__load_vmlinux_path(struct machine *machine)
1279{
1280 struct map *map = machine__kernel_map(machine);
1281 struct dso *dso = map__dso(map);
1282 int ret = dso__load_vmlinux_path(dso, map);
1283
1284 if (ret > 0)
1285 dso__set_loaded(dso);
1286
1287 return ret;
1288}
1289
1290static char *get_kernel_version(const char *root_dir)
1291{
1292 char version[PATH_MAX];
1293 FILE *file;
1294 char *name, *tmp;
1295 const char *prefix = "Linux version ";
1296
1297 sprintf(version, "%s/proc/version", root_dir);
1298 file = fopen(version, "r");
1299 if (!file)
1300 return NULL;
1301
1302 tmp = fgets(version, sizeof(version), file);
1303 fclose(file);
1304 if (!tmp)
1305 return NULL;
1306
1307 name = strstr(version, prefix);
1308 if (!name)
1309 return NULL;
1310 name += strlen(prefix);
1311 tmp = strchr(name, ' ');
1312 if (tmp)
1313 *tmp = '\0';
1314
1315 return strdup(name);
1316}
1317
1318static bool is_kmod_dso(struct dso *dso)
1319{
1320 return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1321 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE;
1322}
1323
1324static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1325{
1326 char *long_name;
1327 struct dso *dso;
1328 struct map *map = maps__find_by_name(maps, m->name);
1329
1330 if (map == NULL)
1331 return 0;
1332
1333 long_name = strdup(path);
1334 if (long_name == NULL) {
1335 map__put(map);
1336 return -ENOMEM;
1337 }
1338
1339 dso = map__dso(map);
1340 dso__set_long_name(dso, long_name, true);
1341 dso__kernel_module_get_build_id(dso, "");
1342
1343 /*
1344 * Full name could reveal us kmod compression, so
1345 * we need to update the symtab_type if needed.
1346 */
1347 if (m->comp && is_kmod_dso(dso)) {
1348 dso__set_symtab_type(dso, dso__symtab_type(dso)+1);
1349 dso__set_comp(dso, m->comp);
1350 }
1351 map__put(map);
1352 return 0;
1353}
1354
1355static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1356{
1357 struct dirent *dent;
1358 DIR *dir = opendir(dir_name);
1359 int ret = 0;
1360
1361 if (!dir) {
1362 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1363 return -1;
1364 }
1365
1366 while ((dent = readdir(dir)) != NULL) {
1367 char path[PATH_MAX];
1368 struct stat st;
1369
1370 /*sshfs might return bad dent->d_type, so we have to stat*/
1371 path__join(path, sizeof(path), dir_name, dent->d_name);
1372 if (stat(path, &st))
1373 continue;
1374
1375 if (S_ISDIR(st.st_mode)) {
1376 if (!strcmp(dent->d_name, ".") ||
1377 !strcmp(dent->d_name, ".."))
1378 continue;
1379
1380 /* Do not follow top-level source and build symlinks */
1381 if (depth == 0) {
1382 if (!strcmp(dent->d_name, "source") ||
1383 !strcmp(dent->d_name, "build"))
1384 continue;
1385 }
1386
1387 ret = maps__set_modules_path_dir(maps, path, depth + 1);
1388 if (ret < 0)
1389 goto out;
1390 } else {
1391 struct kmod_path m;
1392
1393 ret = kmod_path__parse_name(&m, dent->d_name);
1394 if (ret)
1395 goto out;
1396
1397 if (m.kmod)
1398 ret = maps__set_module_path(maps, path, &m);
1399
1400 zfree(&m.name);
1401
1402 if (ret)
1403 goto out;
1404 }
1405 }
1406
1407out:
1408 closedir(dir);
1409 return ret;
1410}
1411
1412static int machine__set_modules_path(struct machine *machine)
1413{
1414 char *version;
1415 char modules_path[PATH_MAX];
1416
1417 version = get_kernel_version(machine->root_dir);
1418 if (!version)
1419 return -1;
1420
1421 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1422 machine->root_dir, version);
1423 free(version);
1424
1425 return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1426}
1427int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1428 u64 *size __maybe_unused,
1429 const char *name __maybe_unused)
1430{
1431 return 0;
1432}
1433
1434static int machine__create_module(void *arg, const char *name, u64 start,
1435 u64 size)
1436{
1437 struct machine *machine = arg;
1438 struct map *map;
1439
1440 if (arch__fix_module_text_start(&start, &size, name) < 0)
1441 return -1;
1442
1443 map = machine__addnew_module_map(machine, start, name);
1444 if (map == NULL)
1445 return -1;
1446 map__set_end(map, start + size);
1447
1448 dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1449 map__put(map);
1450 return 0;
1451}
1452
1453static int machine__create_modules(struct machine *machine)
1454{
1455 const char *modules;
1456 char path[PATH_MAX];
1457
1458 if (machine__is_default_guest(machine)) {
1459 modules = symbol_conf.default_guest_modules;
1460 } else {
1461 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1462 modules = path;
1463 }
1464
1465 if (symbol__restricted_filename(modules, "/proc/modules"))
1466 return -1;
1467
1468 if (modules__parse(modules, machine, machine__create_module))
1469 return -1;
1470
1471 if (!machine__set_modules_path(machine))
1472 return 0;
1473
1474 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1475
1476 return 0;
1477}
1478
1479static void machine__set_kernel_mmap(struct machine *machine,
1480 u64 start, u64 end)
1481{
1482 map__set_start(machine->vmlinux_map, start);
1483 map__set_end(machine->vmlinux_map, end);
1484 /*
1485 * Be a bit paranoid here, some perf.data file came with
1486 * a zero sized synthesized MMAP event for the kernel.
1487 */
1488 if (start == 0 && end == 0)
1489 map__set_end(machine->vmlinux_map, ~0ULL);
1490}
1491
1492static int machine__update_kernel_mmap(struct machine *machine,
1493 u64 start, u64 end)
1494{
1495 struct map *orig, *updated;
1496 int err;
1497
1498 orig = machine->vmlinux_map;
1499 updated = map__get(orig);
1500
1501 machine->vmlinux_map = updated;
1502 maps__remove(machine__kernel_maps(machine), orig);
1503 machine__set_kernel_mmap(machine, start, end);
1504 err = maps__insert(machine__kernel_maps(machine), updated);
1505 map__put(orig);
1506
1507 return err;
1508}
1509
1510int machine__create_kernel_maps(struct machine *machine)
1511{
1512 struct dso *kernel = machine__get_kernel(machine);
1513 const char *name = NULL;
1514 u64 start = 0, end = ~0ULL;
1515 int ret;
1516
1517 if (kernel == NULL)
1518 return -1;
1519
1520 ret = __machine__create_kernel_maps(machine, kernel);
1521 if (ret < 0)
1522 goto out_put;
1523
1524 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1525 if (machine__is_host(machine))
1526 pr_debug("Problems creating module maps, "
1527 "continuing anyway...\n");
1528 else
1529 pr_debug("Problems creating module maps for guest %d, "
1530 "continuing anyway...\n", machine->pid);
1531 }
1532
1533 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1534 if (name &&
1535 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1536 machine__destroy_kernel_maps(machine);
1537 ret = -1;
1538 goto out_put;
1539 }
1540
1541 /*
1542 * we have a real start address now, so re-order the kmaps
1543 * assume it's the last in the kmaps
1544 */
1545 ret = machine__update_kernel_mmap(machine, start, end);
1546 if (ret < 0)
1547 goto out_put;
1548 }
1549
1550 if (machine__create_extra_kernel_maps(machine, kernel))
1551 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1552
1553 if (end == ~0ULL) {
1554 /* update end address of the kernel map using adjacent module address */
1555 struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
1556 machine__kernel_map(machine));
1557
1558 if (next) {
1559 machine__set_kernel_mmap(machine, start, map__start(next));
1560 map__put(next);
1561 }
1562 }
1563
1564out_put:
1565 dso__put(kernel);
1566 return ret;
1567}
1568
1569static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused)
1570{
1571 return dso__is_kcore(dso) ? 1 : 0;
1572}
1573
1574static bool machine__uses_kcore(struct machine *machine)
1575{
1576 return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false;
1577}
1578
1579static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1580 struct extra_kernel_map *xm)
1581{
1582 return machine__is(machine, "x86_64") &&
1583 is_entry_trampoline(xm->name);
1584}
1585
1586static int machine__process_extra_kernel_map(struct machine *machine,
1587 struct extra_kernel_map *xm)
1588{
1589 struct dso *kernel = machine__kernel_dso(machine);
1590
1591 if (kernel == NULL)
1592 return -1;
1593
1594 return machine__create_extra_kernel_map(machine, kernel, xm);
1595}
1596
1597static int machine__process_kernel_mmap_event(struct machine *machine,
1598 struct extra_kernel_map *xm,
1599 struct build_id *bid)
1600{
1601 enum dso_space_type dso_space;
1602 bool is_kernel_mmap;
1603 const char *mmap_name = machine->mmap_name;
1604
1605 /* If we have maps from kcore then we do not need or want any others */
1606 if (machine__uses_kcore(machine))
1607 return 0;
1608
1609 if (machine__is_host(machine))
1610 dso_space = DSO_SPACE__KERNEL;
1611 else
1612 dso_space = DSO_SPACE__KERNEL_GUEST;
1613
1614 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1615 if (!is_kernel_mmap && !machine__is_host(machine)) {
1616 /*
1617 * If the event was recorded inside the guest and injected into
1618 * the host perf.data file, then it will match a host mmap_name,
1619 * so try that - see machine__set_mmap_name().
1620 */
1621 mmap_name = "[kernel.kallsyms]";
1622 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1623 }
1624 if (xm->name[0] == '/' ||
1625 (!is_kernel_mmap && xm->name[0] == '[')) {
1626 struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
1627
1628 if (map == NULL)
1629 goto out_problem;
1630
1631 map__set_end(map, map__start(map) + xm->end - xm->start);
1632
1633 if (build_id__is_defined(bid))
1634 dso__set_build_id(map__dso(map), bid);
1635
1636 map__put(map);
1637 } else if (is_kernel_mmap) {
1638 const char *symbol_name = xm->name + strlen(mmap_name);
1639 /*
1640 * Should be there already, from the build-id table in
1641 * the header.
1642 */
1643 struct dso *kernel = dsos__find_kernel_dso(&machine->dsos);
1644
1645 if (kernel == NULL)
1646 kernel = machine__findnew_dso(machine, machine->mmap_name);
1647 if (kernel == NULL)
1648 goto out_problem;
1649
1650 dso__set_kernel(kernel, dso_space);
1651 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1652 dso__put(kernel);
1653 goto out_problem;
1654 }
1655
1656 if (strstr(dso__long_name(kernel), "vmlinux"))
1657 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1658
1659 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1660 dso__put(kernel);
1661 goto out_problem;
1662 }
1663
1664 if (build_id__is_defined(bid))
1665 dso__set_build_id(kernel, bid);
1666
1667 /*
1668 * Avoid using a zero address (kptr_restrict) for the ref reloc
1669 * symbol. Effectively having zero here means that at record
1670 * time /proc/sys/kernel/kptr_restrict was non zero.
1671 */
1672 if (xm->pgoff != 0) {
1673 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1674 symbol_name,
1675 xm->pgoff);
1676 }
1677
1678 if (machine__is_default_guest(machine)) {
1679 /*
1680 * preload dso of guest kernel and modules
1681 */
1682 dso__load(kernel, machine__kernel_map(machine));
1683 }
1684 dso__put(kernel);
1685 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1686 return machine__process_extra_kernel_map(machine, xm);
1687 }
1688 return 0;
1689out_problem:
1690 return -1;
1691}
1692
1693int machine__process_mmap2_event(struct machine *machine,
1694 union perf_event *event,
1695 struct perf_sample *sample)
1696{
1697 struct thread *thread;
1698 struct map *map;
1699 struct dso_id dso_id = {
1700 .maj = event->mmap2.maj,
1701 .min = event->mmap2.min,
1702 .ino = event->mmap2.ino,
1703 .ino_generation = event->mmap2.ino_generation,
1704 };
1705 struct build_id __bid, *bid = NULL;
1706 int ret = 0;
1707
1708 if (dump_trace)
1709 perf_event__fprintf_mmap2(event, stdout);
1710
1711 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1712 bid = &__bid;
1713 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1714 }
1715
1716 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1717 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1718 struct extra_kernel_map xm = {
1719 .start = event->mmap2.start,
1720 .end = event->mmap2.start + event->mmap2.len,
1721 .pgoff = event->mmap2.pgoff,
1722 };
1723
1724 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1725 ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1726 if (ret < 0)
1727 goto out_problem;
1728 return 0;
1729 }
1730
1731 thread = machine__findnew_thread(machine, event->mmap2.pid,
1732 event->mmap2.tid);
1733 if (thread == NULL)
1734 goto out_problem;
1735
1736 map = map__new(machine, event->mmap2.start,
1737 event->mmap2.len, event->mmap2.pgoff,
1738 &dso_id, event->mmap2.prot,
1739 event->mmap2.flags, bid,
1740 event->mmap2.filename, thread);
1741
1742 if (map == NULL)
1743 goto out_problem_map;
1744
1745 ret = thread__insert_map(thread, map);
1746 if (ret)
1747 goto out_problem_insert;
1748
1749 thread__put(thread);
1750 map__put(map);
1751 return 0;
1752
1753out_problem_insert:
1754 map__put(map);
1755out_problem_map:
1756 thread__put(thread);
1757out_problem:
1758 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1759 return 0;
1760}
1761
1762int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1763 struct perf_sample *sample)
1764{
1765 struct thread *thread;
1766 struct map *map;
1767 u32 prot = 0;
1768 int ret = 0;
1769
1770 if (dump_trace)
1771 perf_event__fprintf_mmap(event, stdout);
1772
1773 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1774 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1775 struct extra_kernel_map xm = {
1776 .start = event->mmap.start,
1777 .end = event->mmap.start + event->mmap.len,
1778 .pgoff = event->mmap.pgoff,
1779 };
1780
1781 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1782 ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1783 if (ret < 0)
1784 goto out_problem;
1785 return 0;
1786 }
1787
1788 thread = machine__findnew_thread(machine, event->mmap.pid,
1789 event->mmap.tid);
1790 if (thread == NULL)
1791 goto out_problem;
1792
1793 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1794 prot = PROT_EXEC;
1795
1796 map = map__new(machine, event->mmap.start,
1797 event->mmap.len, event->mmap.pgoff,
1798 NULL, prot, 0, NULL, event->mmap.filename, thread);
1799
1800 if (map == NULL)
1801 goto out_problem_map;
1802
1803 ret = thread__insert_map(thread, map);
1804 if (ret)
1805 goto out_problem_insert;
1806
1807 thread__put(thread);
1808 map__put(map);
1809 return 0;
1810
1811out_problem_insert:
1812 map__put(map);
1813out_problem_map:
1814 thread__put(thread);
1815out_problem:
1816 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1817 return 0;
1818}
1819
1820void machine__remove_thread(struct machine *machine, struct thread *th)
1821{
1822 return threads__remove(&machine->threads, th);
1823}
1824
1825int machine__process_fork_event(struct machine *machine, union perf_event *event,
1826 struct perf_sample *sample)
1827{
1828 struct thread *thread = machine__find_thread(machine,
1829 event->fork.pid,
1830 event->fork.tid);
1831 struct thread *parent = machine__findnew_thread(machine,
1832 event->fork.ppid,
1833 event->fork.ptid);
1834 bool do_maps_clone = true;
1835 int err = 0;
1836
1837 if (dump_trace)
1838 perf_event__fprintf_task(event, stdout);
1839
1840 /*
1841 * There may be an existing thread that is not actually the parent,
1842 * either because we are processing events out of order, or because the
1843 * (fork) event that would have removed the thread was lost. Assume the
1844 * latter case and continue on as best we can.
1845 */
1846 if (thread__pid(parent) != (pid_t)event->fork.ppid) {
1847 dump_printf("removing erroneous parent thread %d/%d\n",
1848 thread__pid(parent), thread__tid(parent));
1849 machine__remove_thread(machine, parent);
1850 thread__put(parent);
1851 parent = machine__findnew_thread(machine, event->fork.ppid,
1852 event->fork.ptid);
1853 }
1854
1855 /* if a thread currently exists for the thread id remove it */
1856 if (thread != NULL) {
1857 machine__remove_thread(machine, thread);
1858 thread__put(thread);
1859 }
1860
1861 thread = machine__findnew_thread(machine, event->fork.pid,
1862 event->fork.tid);
1863 /*
1864 * When synthesizing FORK events, we are trying to create thread
1865 * objects for the already running tasks on the machine.
1866 *
1867 * Normally, for a kernel FORK event, we want to clone the parent's
1868 * maps because that is what the kernel just did.
1869 *
1870 * But when synthesizing, this should not be done. If we do, we end up
1871 * with overlapping maps as we process the synthesized MMAP2 events that
1872 * get delivered shortly thereafter.
1873 *
1874 * Use the FORK event misc flags in an internal way to signal this
1875 * situation, so we can elide the map clone when appropriate.
1876 */
1877 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1878 do_maps_clone = false;
1879
1880 if (thread == NULL || parent == NULL ||
1881 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1882 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1883 err = -1;
1884 }
1885 thread__put(thread);
1886 thread__put(parent);
1887
1888 return err;
1889}
1890
1891int machine__process_exit_event(struct machine *machine, union perf_event *event,
1892 struct perf_sample *sample __maybe_unused)
1893{
1894 struct thread *thread = machine__find_thread(machine,
1895 event->fork.pid,
1896 event->fork.tid);
1897
1898 if (dump_trace)
1899 perf_event__fprintf_task(event, stdout);
1900
1901 if (thread != NULL) {
1902 if (symbol_conf.keep_exited_threads)
1903 thread__set_exited(thread, /*exited=*/true);
1904 else
1905 machine__remove_thread(machine, thread);
1906 }
1907 thread__put(thread);
1908 return 0;
1909}
1910
1911int machine__process_event(struct machine *machine, union perf_event *event,
1912 struct perf_sample *sample)
1913{
1914 int ret;
1915
1916 switch (event->header.type) {
1917 case PERF_RECORD_COMM:
1918 ret = machine__process_comm_event(machine, event, sample); break;
1919 case PERF_RECORD_MMAP:
1920 ret = machine__process_mmap_event(machine, event, sample); break;
1921 case PERF_RECORD_NAMESPACES:
1922 ret = machine__process_namespaces_event(machine, event, sample); break;
1923 case PERF_RECORD_CGROUP:
1924 ret = machine__process_cgroup_event(machine, event, sample); break;
1925 case PERF_RECORD_MMAP2:
1926 ret = machine__process_mmap2_event(machine, event, sample); break;
1927 case PERF_RECORD_FORK:
1928 ret = machine__process_fork_event(machine, event, sample); break;
1929 case PERF_RECORD_EXIT:
1930 ret = machine__process_exit_event(machine, event, sample); break;
1931 case PERF_RECORD_LOST:
1932 ret = machine__process_lost_event(machine, event, sample); break;
1933 case PERF_RECORD_AUX:
1934 ret = machine__process_aux_event(machine, event); break;
1935 case PERF_RECORD_ITRACE_START:
1936 ret = machine__process_itrace_start_event(machine, event); break;
1937 case PERF_RECORD_LOST_SAMPLES:
1938 ret = machine__process_lost_samples_event(machine, event, sample); break;
1939 case PERF_RECORD_SWITCH:
1940 case PERF_RECORD_SWITCH_CPU_WIDE:
1941 ret = machine__process_switch_event(machine, event); break;
1942 case PERF_RECORD_KSYMBOL:
1943 ret = machine__process_ksymbol(machine, event, sample); break;
1944 case PERF_RECORD_BPF_EVENT:
1945 ret = machine__process_bpf(machine, event, sample); break;
1946 case PERF_RECORD_TEXT_POKE:
1947 ret = machine__process_text_poke(machine, event, sample); break;
1948 case PERF_RECORD_AUX_OUTPUT_HW_ID:
1949 ret = machine__process_aux_output_hw_id_event(machine, event); break;
1950 default:
1951 ret = -1;
1952 break;
1953 }
1954
1955 return ret;
1956}
1957
1958static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1959{
1960 return regexec(regex, sym->name, 0, NULL, 0) == 0;
1961}
1962
1963static void ip__resolve_ams(struct thread *thread,
1964 struct addr_map_symbol *ams,
1965 u64 ip)
1966{
1967 struct addr_location al;
1968
1969 addr_location__init(&al);
1970 /*
1971 * We cannot use the header.misc hint to determine whether a
1972 * branch stack address is user, kernel, guest, hypervisor.
1973 * Branches may straddle the kernel/user/hypervisor boundaries.
1974 * Thus, we have to try consecutively until we find a match
1975 * or else, the symbol is unknown
1976 */
1977 thread__find_cpumode_addr_location(thread, ip, &al);
1978
1979 ams->addr = ip;
1980 ams->al_addr = al.addr;
1981 ams->al_level = al.level;
1982 ams->ms.maps = maps__get(al.maps);
1983 ams->ms.sym = al.sym;
1984 ams->ms.map = map__get(al.map);
1985 ams->phys_addr = 0;
1986 ams->data_page_size = 0;
1987 addr_location__exit(&al);
1988}
1989
1990static void ip__resolve_data(struct thread *thread,
1991 u8 m, struct addr_map_symbol *ams,
1992 u64 addr, u64 phys_addr, u64 daddr_page_size)
1993{
1994 struct addr_location al;
1995
1996 addr_location__init(&al);
1997
1998 thread__find_symbol(thread, m, addr, &al);
1999
2000 ams->addr = addr;
2001 ams->al_addr = al.addr;
2002 ams->al_level = al.level;
2003 ams->ms.maps = maps__get(al.maps);
2004 ams->ms.sym = al.sym;
2005 ams->ms.map = map__get(al.map);
2006 ams->phys_addr = phys_addr;
2007 ams->data_page_size = daddr_page_size;
2008 addr_location__exit(&al);
2009}
2010
2011struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2012 struct addr_location *al)
2013{
2014 struct mem_info *mi = mem_info__new();
2015
2016 if (!mi)
2017 return NULL;
2018
2019 ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip);
2020 ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi),
2021 sample->addr, sample->phys_addr,
2022 sample->data_page_size);
2023 mem_info__data_src(mi)->val = sample->data_src;
2024
2025 return mi;
2026}
2027
2028static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2029{
2030 struct map *map = ms->map;
2031 char *srcline = NULL;
2032 struct dso *dso;
2033
2034 if (!map || callchain_param.key == CCKEY_FUNCTION)
2035 return srcline;
2036
2037 dso = map__dso(map);
2038 srcline = srcline__tree_find(dso__srclines(dso), ip);
2039 if (!srcline) {
2040 bool show_sym = false;
2041 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2042
2043 srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2044 ms->sym, show_sym, show_addr, ip);
2045 srcline__tree_insert(dso__srclines(dso), ip, srcline);
2046 }
2047
2048 return srcline;
2049}
2050
2051struct iterations {
2052 int nr_loop_iter;
2053 u64 cycles;
2054};
2055
2056static int add_callchain_ip(struct thread *thread,
2057 struct callchain_cursor *cursor,
2058 struct symbol **parent,
2059 struct addr_location *root_al,
2060 u8 *cpumode,
2061 u64 ip,
2062 bool branch,
2063 struct branch_flags *flags,
2064 struct iterations *iter,
2065 u64 branch_from,
2066 bool symbols)
2067{
2068 struct map_symbol ms = {};
2069 struct addr_location al;
2070 int nr_loop_iter = 0, err = 0;
2071 u64 iter_cycles = 0;
2072 const char *srcline = NULL;
2073
2074 addr_location__init(&al);
2075 al.filtered = 0;
2076 al.sym = NULL;
2077 al.srcline = NULL;
2078 if (!cpumode) {
2079 thread__find_cpumode_addr_location(thread, ip, &al);
2080 } else {
2081 if (ip >= PERF_CONTEXT_MAX) {
2082 switch (ip) {
2083 case PERF_CONTEXT_HV:
2084 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2085 break;
2086 case PERF_CONTEXT_KERNEL:
2087 *cpumode = PERF_RECORD_MISC_KERNEL;
2088 break;
2089 case PERF_CONTEXT_USER:
2090 *cpumode = PERF_RECORD_MISC_USER;
2091 break;
2092 default:
2093 pr_debug("invalid callchain context: "
2094 "%"PRId64"\n", (s64) ip);
2095 /*
2096 * It seems the callchain is corrupted.
2097 * Discard all.
2098 */
2099 callchain_cursor_reset(cursor);
2100 err = 1;
2101 goto out;
2102 }
2103 goto out;
2104 }
2105 if (symbols)
2106 thread__find_symbol(thread, *cpumode, ip, &al);
2107 }
2108
2109 if (al.sym != NULL) {
2110 if (perf_hpp_list.parent && !*parent &&
2111 symbol__match_regex(al.sym, &parent_regex))
2112 *parent = al.sym;
2113 else if (have_ignore_callees && root_al &&
2114 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2115 /* Treat this symbol as the root,
2116 forgetting its callees. */
2117 addr_location__copy(root_al, &al);
2118 callchain_cursor_reset(cursor);
2119 }
2120 }
2121
2122 if (symbol_conf.hide_unresolved && al.sym == NULL)
2123 goto out;
2124
2125 if (iter) {
2126 nr_loop_iter = iter->nr_loop_iter;
2127 iter_cycles = iter->cycles;
2128 }
2129
2130 ms.maps = maps__get(al.maps);
2131 ms.map = map__get(al.map);
2132 ms.sym = al.sym;
2133 srcline = callchain_srcline(&ms, al.addr);
2134 err = callchain_cursor_append(cursor, ip, &ms,
2135 branch, flags, nr_loop_iter,
2136 iter_cycles, branch_from, srcline);
2137out:
2138 addr_location__exit(&al);
2139 map_symbol__exit(&ms);
2140 return err;
2141}
2142
2143struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2144 struct addr_location *al)
2145{
2146 unsigned int i;
2147 const struct branch_stack *bs = sample->branch_stack;
2148 struct branch_entry *entries = perf_sample__branch_entries(sample);
2149 u64 *branch_stack_cntr = sample->branch_stack_cntr;
2150 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2151
2152 if (!bi)
2153 return NULL;
2154
2155 for (i = 0; i < bs->nr; i++) {
2156 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2157 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2158 bi[i].flags = entries[i].flags;
2159 if (branch_stack_cntr)
2160 bi[i].branch_stack_cntr = branch_stack_cntr[i];
2161 }
2162 return bi;
2163}
2164
2165static void save_iterations(struct iterations *iter,
2166 struct branch_entry *be, int nr)
2167{
2168 int i;
2169
2170 iter->nr_loop_iter++;
2171 iter->cycles = 0;
2172
2173 for (i = 0; i < nr; i++)
2174 iter->cycles += be[i].flags.cycles;
2175}
2176
2177#define CHASHSZ 127
2178#define CHASHBITS 7
2179#define NO_ENTRY 0xff
2180
2181#define PERF_MAX_BRANCH_DEPTH 127
2182
2183/* Remove loops. */
2184static int remove_loops(struct branch_entry *l, int nr,
2185 struct iterations *iter)
2186{
2187 int i, j, off;
2188 unsigned char chash[CHASHSZ];
2189
2190 memset(chash, NO_ENTRY, sizeof(chash));
2191
2192 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2193
2194 for (i = 0; i < nr; i++) {
2195 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2196
2197 /* no collision handling for now */
2198 if (chash[h] == NO_ENTRY) {
2199 chash[h] = i;
2200 } else if (l[chash[h]].from == l[i].from) {
2201 bool is_loop = true;
2202 /* check if it is a real loop */
2203 off = 0;
2204 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2205 if (l[j].from != l[i + off].from) {
2206 is_loop = false;
2207 break;
2208 }
2209 if (is_loop) {
2210 j = nr - (i + off);
2211 if (j > 0) {
2212 save_iterations(iter + i + off,
2213 l + i, off);
2214
2215 memmove(iter + i, iter + i + off,
2216 j * sizeof(*iter));
2217
2218 memmove(l + i, l + i + off,
2219 j * sizeof(*l));
2220 }
2221
2222 nr -= off;
2223 }
2224 }
2225 }
2226 return nr;
2227}
2228
2229static int lbr_callchain_add_kernel_ip(struct thread *thread,
2230 struct callchain_cursor *cursor,
2231 struct perf_sample *sample,
2232 struct symbol **parent,
2233 struct addr_location *root_al,
2234 u64 branch_from,
2235 bool callee, int end,
2236 bool symbols)
2237{
2238 struct ip_callchain *chain = sample->callchain;
2239 u8 cpumode = PERF_RECORD_MISC_USER;
2240 int err, i;
2241
2242 if (callee) {
2243 for (i = 0; i < end + 1; i++) {
2244 err = add_callchain_ip(thread, cursor, parent,
2245 root_al, &cpumode, chain->ips[i],
2246 false, NULL, NULL, branch_from,
2247 symbols);
2248 if (err)
2249 return err;
2250 }
2251 return 0;
2252 }
2253
2254 for (i = end; i >= 0; i--) {
2255 err = add_callchain_ip(thread, cursor, parent,
2256 root_al, &cpumode, chain->ips[i],
2257 false, NULL, NULL, branch_from,
2258 symbols);
2259 if (err)
2260 return err;
2261 }
2262
2263 return 0;
2264}
2265
2266static void save_lbr_cursor_node(struct thread *thread,
2267 struct callchain_cursor *cursor,
2268 int idx)
2269{
2270 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2271
2272 if (!lbr_stitch)
2273 return;
2274
2275 if (cursor->pos == cursor->nr) {
2276 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2277 return;
2278 }
2279
2280 if (!cursor->curr)
2281 cursor->curr = cursor->first;
2282 else
2283 cursor->curr = cursor->curr->next;
2284
2285 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
2286 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2287 sizeof(struct callchain_cursor_node));
2288 lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
2289 lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
2290
2291 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2292 cursor->pos++;
2293}
2294
2295static int lbr_callchain_add_lbr_ip(struct thread *thread,
2296 struct callchain_cursor *cursor,
2297 struct perf_sample *sample,
2298 struct symbol **parent,
2299 struct addr_location *root_al,
2300 u64 *branch_from,
2301 bool callee,
2302 bool symbols)
2303{
2304 struct branch_stack *lbr_stack = sample->branch_stack;
2305 struct branch_entry *entries = perf_sample__branch_entries(sample);
2306 u8 cpumode = PERF_RECORD_MISC_USER;
2307 int lbr_nr = lbr_stack->nr;
2308 struct branch_flags *flags;
2309 int err, i;
2310 u64 ip;
2311
2312 /*
2313 * The curr and pos are not used in writing session. They are cleared
2314 * in callchain_cursor_commit() when the writing session is closed.
2315 * Using curr and pos to track the current cursor node.
2316 */
2317 if (thread__lbr_stitch(thread)) {
2318 cursor->curr = NULL;
2319 cursor->pos = cursor->nr;
2320 if (cursor->nr) {
2321 cursor->curr = cursor->first;
2322 for (i = 0; i < (int)(cursor->nr - 1); i++)
2323 cursor->curr = cursor->curr->next;
2324 }
2325 }
2326
2327 if (callee) {
2328 /* Add LBR ip from first entries.to */
2329 ip = entries[0].to;
2330 flags = &entries[0].flags;
2331 *branch_from = entries[0].from;
2332 err = add_callchain_ip(thread, cursor, parent,
2333 root_al, &cpumode, ip,
2334 true, flags, NULL,
2335 *branch_from, symbols);
2336 if (err)
2337 return err;
2338
2339 /*
2340 * The number of cursor node increases.
2341 * Move the current cursor node.
2342 * But does not need to save current cursor node for entry 0.
2343 * It's impossible to stitch the whole LBRs of previous sample.
2344 */
2345 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2346 if (!cursor->curr)
2347 cursor->curr = cursor->first;
2348 else
2349 cursor->curr = cursor->curr->next;
2350 cursor->pos++;
2351 }
2352
2353 /* Add LBR ip from entries.from one by one. */
2354 for (i = 0; i < lbr_nr; i++) {
2355 ip = entries[i].from;
2356 flags = &entries[i].flags;
2357 err = add_callchain_ip(thread, cursor, parent,
2358 root_al, &cpumode, ip,
2359 true, flags, NULL,
2360 *branch_from, symbols);
2361 if (err)
2362 return err;
2363 save_lbr_cursor_node(thread, cursor, i);
2364 }
2365 return 0;
2366 }
2367
2368 /* Add LBR ip from entries.from one by one. */
2369 for (i = lbr_nr - 1; i >= 0; i--) {
2370 ip = entries[i].from;
2371 flags = &entries[i].flags;
2372 err = add_callchain_ip(thread, cursor, parent,
2373 root_al, &cpumode, ip,
2374 true, flags, NULL,
2375 *branch_from, symbols);
2376 if (err)
2377 return err;
2378 save_lbr_cursor_node(thread, cursor, i);
2379 }
2380
2381 if (lbr_nr > 0) {
2382 /* Add LBR ip from first entries.to */
2383 ip = entries[0].to;
2384 flags = &entries[0].flags;
2385 *branch_from = entries[0].from;
2386 err = add_callchain_ip(thread, cursor, parent,
2387 root_al, &cpumode, ip,
2388 true, flags, NULL,
2389 *branch_from, symbols);
2390 if (err)
2391 return err;
2392 }
2393
2394 return 0;
2395}
2396
2397static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2398 struct callchain_cursor *cursor)
2399{
2400 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2401 struct callchain_cursor_node *cnode;
2402 struct stitch_list *stitch_node;
2403 int err;
2404
2405 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2406 cnode = &stitch_node->cursor;
2407
2408 err = callchain_cursor_append(cursor, cnode->ip,
2409 &cnode->ms,
2410 cnode->branch,
2411 &cnode->branch_flags,
2412 cnode->nr_loop_iter,
2413 cnode->iter_cycles,
2414 cnode->branch_from,
2415 cnode->srcline);
2416 if (err)
2417 return err;
2418 }
2419 return 0;
2420}
2421
2422static struct stitch_list *get_stitch_node(struct thread *thread)
2423{
2424 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2425 struct stitch_list *stitch_node;
2426
2427 if (!list_empty(&lbr_stitch->free_lists)) {
2428 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2429 struct stitch_list, node);
2430 list_del(&stitch_node->node);
2431
2432 return stitch_node;
2433 }
2434
2435 return malloc(sizeof(struct stitch_list));
2436}
2437
2438static bool has_stitched_lbr(struct thread *thread,
2439 struct perf_sample *cur,
2440 struct perf_sample *prev,
2441 unsigned int max_lbr,
2442 bool callee)
2443{
2444 struct branch_stack *cur_stack = cur->branch_stack;
2445 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2446 struct branch_stack *prev_stack = prev->branch_stack;
2447 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2448 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2449 int i, j, nr_identical_branches = 0;
2450 struct stitch_list *stitch_node;
2451 u64 cur_base, distance;
2452
2453 if (!cur_stack || !prev_stack)
2454 return false;
2455
2456 /* Find the physical index of the base-of-stack for current sample. */
2457 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2458
2459 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2460 (max_lbr + prev_stack->hw_idx - cur_base);
2461 /* Previous sample has shorter stack. Nothing can be stitched. */
2462 if (distance + 1 > prev_stack->nr)
2463 return false;
2464
2465 /*
2466 * Check if there are identical LBRs between two samples.
2467 * Identical LBRs must have same from, to and flags values. Also,
2468 * they have to be saved in the same LBR registers (same physical
2469 * index).
2470 *
2471 * Starts from the base-of-stack of current sample.
2472 */
2473 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2474 if ((prev_entries[i].from != cur_entries[j].from) ||
2475 (prev_entries[i].to != cur_entries[j].to) ||
2476 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2477 break;
2478 nr_identical_branches++;
2479 }
2480
2481 if (!nr_identical_branches)
2482 return false;
2483
2484 /*
2485 * Save the LBRs between the base-of-stack of previous sample
2486 * and the base-of-stack of current sample into lbr_stitch->lists.
2487 * These LBRs will be stitched later.
2488 */
2489 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2490
2491 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2492 continue;
2493
2494 stitch_node = get_stitch_node(thread);
2495 if (!stitch_node)
2496 return false;
2497
2498 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2499 sizeof(struct callchain_cursor_node));
2500
2501 stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
2502 stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
2503
2504 if (callee)
2505 list_add(&stitch_node->node, &lbr_stitch->lists);
2506 else
2507 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2508 }
2509
2510 return true;
2511}
2512
2513static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2514{
2515 if (thread__lbr_stitch(thread))
2516 return true;
2517
2518 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2519 if (!thread__lbr_stitch(thread))
2520 goto err;
2521
2522 thread__lbr_stitch(thread)->prev_lbr_cursor =
2523 calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2524 if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2525 goto free_lbr_stitch;
2526
2527 thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
2528
2529 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2530 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2531
2532 return true;
2533
2534free_lbr_stitch:
2535 free(thread__lbr_stitch(thread));
2536 thread__set_lbr_stitch(thread, NULL);
2537err:
2538 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2539 thread__set_lbr_stitch_enable(thread, false);
2540 return false;
2541}
2542
2543/*
2544 * Resolve LBR callstack chain sample
2545 * Return:
2546 * 1 on success get LBR callchain information
2547 * 0 no available LBR callchain information, should try fp
2548 * negative error code on other errors.
2549 */
2550static int resolve_lbr_callchain_sample(struct thread *thread,
2551 struct callchain_cursor *cursor,
2552 struct perf_sample *sample,
2553 struct symbol **parent,
2554 struct addr_location *root_al,
2555 int max_stack,
2556 unsigned int max_lbr,
2557 bool symbols)
2558{
2559 bool callee = (callchain_param.order == ORDER_CALLEE);
2560 struct ip_callchain *chain = sample->callchain;
2561 int chain_nr = min(max_stack, (int)chain->nr), i;
2562 struct lbr_stitch *lbr_stitch;
2563 bool stitched_lbr = false;
2564 u64 branch_from = 0;
2565 int err;
2566
2567 for (i = 0; i < chain_nr; i++) {
2568 if (chain->ips[i] == PERF_CONTEXT_USER)
2569 break;
2570 }
2571
2572 /* LBR only affects the user callchain */
2573 if (i == chain_nr)
2574 return 0;
2575
2576 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2577 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2578 lbr_stitch = thread__lbr_stitch(thread);
2579
2580 stitched_lbr = has_stitched_lbr(thread, sample,
2581 &lbr_stitch->prev_sample,
2582 max_lbr, callee);
2583
2584 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2585 struct stitch_list *stitch_node;
2586
2587 list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
2588 map_symbol__exit(&stitch_node->cursor.ms);
2589
2590 list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
2591 }
2592 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2593 }
2594
2595 if (callee) {
2596 /* Add kernel ip */
2597 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2598 parent, root_al, branch_from,
2599 true, i, symbols);
2600 if (err)
2601 goto error;
2602
2603 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2604 root_al, &branch_from, true, symbols);
2605 if (err)
2606 goto error;
2607
2608 if (stitched_lbr) {
2609 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2610 if (err)
2611 goto error;
2612 }
2613
2614 } else {
2615 if (stitched_lbr) {
2616 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2617 if (err)
2618 goto error;
2619 }
2620 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2621 root_al, &branch_from, false, symbols);
2622 if (err)
2623 goto error;
2624
2625 /* Add kernel ip */
2626 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2627 parent, root_al, branch_from,
2628 false, i, symbols);
2629 if (err)
2630 goto error;
2631 }
2632 return 1;
2633
2634error:
2635 return (err < 0) ? err : 0;
2636}
2637
2638static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2639 struct callchain_cursor *cursor,
2640 struct symbol **parent,
2641 struct addr_location *root_al,
2642 u8 *cpumode, int ent, bool symbols)
2643{
2644 int err = 0;
2645
2646 while (--ent >= 0) {
2647 u64 ip = chain->ips[ent];
2648
2649 if (ip >= PERF_CONTEXT_MAX) {
2650 err = add_callchain_ip(thread, cursor, parent,
2651 root_al, cpumode, ip,
2652 false, NULL, NULL, 0, symbols);
2653 break;
2654 }
2655 }
2656 return err;
2657}
2658
2659static u64 get_leaf_frame_caller(struct perf_sample *sample,
2660 struct thread *thread, int usr_idx)
2661{
2662 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2663 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2664 else
2665 return 0;
2666}
2667
2668static int thread__resolve_callchain_sample(struct thread *thread,
2669 struct callchain_cursor *cursor,
2670 struct evsel *evsel,
2671 struct perf_sample *sample,
2672 struct symbol **parent,
2673 struct addr_location *root_al,
2674 int max_stack,
2675 bool symbols)
2676{
2677 struct branch_stack *branch = sample->branch_stack;
2678 struct branch_entry *entries = perf_sample__branch_entries(sample);
2679 struct ip_callchain *chain = sample->callchain;
2680 int chain_nr = 0;
2681 u8 cpumode = PERF_RECORD_MISC_USER;
2682 int i, j, err, nr_entries, usr_idx;
2683 int skip_idx = -1;
2684 int first_call = 0;
2685 u64 leaf_frame_caller;
2686
2687 if (chain)
2688 chain_nr = chain->nr;
2689
2690 if (evsel__has_branch_callstack(evsel)) {
2691 struct perf_env *env = evsel__env(evsel);
2692
2693 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2694 root_al, max_stack,
2695 !env ? 0 : env->max_branches,
2696 symbols);
2697 if (err)
2698 return (err < 0) ? err : 0;
2699 }
2700
2701 /*
2702 * Based on DWARF debug information, some architectures skip
2703 * a callchain entry saved by the kernel.
2704 */
2705 skip_idx = arch_skip_callchain_idx(thread, chain);
2706
2707 /*
2708 * Add branches to call stack for easier browsing. This gives
2709 * more context for a sample than just the callers.
2710 *
2711 * This uses individual histograms of paths compared to the
2712 * aggregated histograms the normal LBR mode uses.
2713 *
2714 * Limitations for now:
2715 * - No extra filters
2716 * - No annotations (should annotate somehow)
2717 */
2718
2719 if (branch && callchain_param.branch_callstack) {
2720 int nr = min(max_stack, (int)branch->nr);
2721 struct branch_entry be[nr];
2722 struct iterations iter[nr];
2723
2724 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2725 pr_warning("corrupted branch chain. skipping...\n");
2726 goto check_calls;
2727 }
2728
2729 for (i = 0; i < nr; i++) {
2730 if (callchain_param.order == ORDER_CALLEE) {
2731 be[i] = entries[i];
2732
2733 if (chain == NULL)
2734 continue;
2735
2736 /*
2737 * Check for overlap into the callchain.
2738 * The return address is one off compared to
2739 * the branch entry. To adjust for this
2740 * assume the calling instruction is not longer
2741 * than 8 bytes.
2742 */
2743 if (i == skip_idx ||
2744 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2745 first_call++;
2746 else if (be[i].from < chain->ips[first_call] &&
2747 be[i].from >= chain->ips[first_call] - 8)
2748 first_call++;
2749 } else
2750 be[i] = entries[branch->nr - i - 1];
2751 }
2752
2753 memset(iter, 0, sizeof(struct iterations) * nr);
2754 nr = remove_loops(be, nr, iter);
2755
2756 for (i = 0; i < nr; i++) {
2757 err = add_callchain_ip(thread, cursor, parent,
2758 root_al,
2759 NULL, be[i].to,
2760 true, &be[i].flags,
2761 NULL, be[i].from, symbols);
2762
2763 if (!err) {
2764 err = add_callchain_ip(thread, cursor, parent, root_al,
2765 NULL, be[i].from,
2766 true, &be[i].flags,
2767 &iter[i], 0, symbols);
2768 }
2769 if (err == -EINVAL)
2770 break;
2771 if (err)
2772 return err;
2773 }
2774
2775 if (chain_nr == 0)
2776 return 0;
2777
2778 chain_nr -= nr;
2779 }
2780
2781check_calls:
2782 if (chain && callchain_param.order != ORDER_CALLEE) {
2783 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2784 &cpumode, chain->nr - first_call, symbols);
2785 if (err)
2786 return (err < 0) ? err : 0;
2787 }
2788 for (i = first_call, nr_entries = 0;
2789 i < chain_nr && nr_entries < max_stack; i++) {
2790 u64 ip;
2791
2792 if (callchain_param.order == ORDER_CALLEE)
2793 j = i;
2794 else
2795 j = chain->nr - i - 1;
2796
2797#ifdef HAVE_SKIP_CALLCHAIN_IDX
2798 if (j == skip_idx)
2799 continue;
2800#endif
2801 ip = chain->ips[j];
2802 if (ip < PERF_CONTEXT_MAX)
2803 ++nr_entries;
2804 else if (callchain_param.order != ORDER_CALLEE) {
2805 err = find_prev_cpumode(chain, thread, cursor, parent,
2806 root_al, &cpumode, j, symbols);
2807 if (err)
2808 return (err < 0) ? err : 0;
2809 continue;
2810 }
2811
2812 /*
2813 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
2814 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
2815 * the index will be different in order to add the missing frame
2816 * at the right place.
2817 */
2818
2819 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
2820
2821 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
2822
2823 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
2824
2825 /*
2826 * check if leaf_frame_Caller != ip to not add the same
2827 * value twice.
2828 */
2829
2830 if (leaf_frame_caller && leaf_frame_caller != ip) {
2831
2832 err = add_callchain_ip(thread, cursor, parent,
2833 root_al, &cpumode, leaf_frame_caller,
2834 false, NULL, NULL, 0, symbols);
2835 if (err)
2836 return (err < 0) ? err : 0;
2837 }
2838 }
2839
2840 err = add_callchain_ip(thread, cursor, parent,
2841 root_al, &cpumode, ip,
2842 false, NULL, NULL, 0, symbols);
2843
2844 if (err)
2845 return (err < 0) ? err : 0;
2846 }
2847
2848 return 0;
2849}
2850
2851static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2852{
2853 struct symbol *sym = ms->sym;
2854 struct map *map = ms->map;
2855 struct inline_node *inline_node;
2856 struct inline_list *ilist;
2857 struct dso *dso;
2858 u64 addr;
2859 int ret = 1;
2860 struct map_symbol ilist_ms;
2861
2862 if (!symbol_conf.inline_name || !map || !sym)
2863 return ret;
2864
2865 addr = map__dso_map_ip(map, ip);
2866 addr = map__rip_2objdump(map, addr);
2867 dso = map__dso(map);
2868
2869 inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr);
2870 if (!inline_node) {
2871 inline_node = dso__parse_addr_inlines(dso, addr, sym);
2872 if (!inline_node)
2873 return ret;
2874 inlines__tree_insert(dso__inlined_nodes(dso), inline_node);
2875 }
2876
2877 ilist_ms = (struct map_symbol) {
2878 .maps = maps__get(ms->maps),
2879 .map = map__get(map),
2880 };
2881 list_for_each_entry(ilist, &inline_node->val, list) {
2882 ilist_ms.sym = ilist->symbol;
2883 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2884 NULL, 0, 0, 0, ilist->srcline);
2885
2886 if (ret != 0)
2887 return ret;
2888 }
2889 map_symbol__exit(&ilist_ms);
2890
2891 return ret;
2892}
2893
2894static int unwind_entry(struct unwind_entry *entry, void *arg)
2895{
2896 struct callchain_cursor *cursor = arg;
2897 const char *srcline = NULL;
2898 u64 addr = entry->ip;
2899
2900 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2901 return 0;
2902
2903 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2904 return 0;
2905
2906 /*
2907 * Convert entry->ip from a virtual address to an offset in
2908 * its corresponding binary.
2909 */
2910 if (entry->ms.map)
2911 addr = map__dso_map_ip(entry->ms.map, entry->ip);
2912
2913 srcline = callchain_srcline(&entry->ms, addr);
2914 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2915 false, NULL, 0, 0, 0, srcline);
2916}
2917
2918static int thread__resolve_callchain_unwind(struct thread *thread,
2919 struct callchain_cursor *cursor,
2920 struct evsel *evsel,
2921 struct perf_sample *sample,
2922 int max_stack, bool symbols)
2923{
2924 /* Can we do dwarf post unwind? */
2925 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2926 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2927 return 0;
2928
2929 /* Bail out if nothing was captured. */
2930 if ((!sample->user_regs.regs) ||
2931 (!sample->user_stack.size))
2932 return 0;
2933
2934 if (!symbols)
2935 pr_debug("Not resolving symbols with an unwinder isn't currently supported\n");
2936
2937 return unwind__get_entries(unwind_entry, cursor,
2938 thread, sample, max_stack, false);
2939}
2940
2941int __thread__resolve_callchain(struct thread *thread,
2942 struct callchain_cursor *cursor,
2943 struct evsel *evsel,
2944 struct perf_sample *sample,
2945 struct symbol **parent,
2946 struct addr_location *root_al,
2947 int max_stack,
2948 bool symbols)
2949{
2950 int ret = 0;
2951
2952 if (cursor == NULL)
2953 return -ENOMEM;
2954
2955 callchain_cursor_reset(cursor);
2956
2957 if (callchain_param.order == ORDER_CALLEE) {
2958 ret = thread__resolve_callchain_sample(thread, cursor,
2959 evsel, sample,
2960 parent, root_al,
2961 max_stack, symbols);
2962 if (ret)
2963 return ret;
2964 ret = thread__resolve_callchain_unwind(thread, cursor,
2965 evsel, sample,
2966 max_stack, symbols);
2967 } else {
2968 ret = thread__resolve_callchain_unwind(thread, cursor,
2969 evsel, sample,
2970 max_stack, symbols);
2971 if (ret)
2972 return ret;
2973 ret = thread__resolve_callchain_sample(thread, cursor,
2974 evsel, sample,
2975 parent, root_al,
2976 max_stack, symbols);
2977 }
2978
2979 return ret;
2980}
2981
2982int machine__for_each_thread(struct machine *machine,
2983 int (*fn)(struct thread *thread, void *p),
2984 void *priv)
2985{
2986 return threads__for_each_thread(&machine->threads, fn, priv);
2987}
2988
2989int machines__for_each_thread(struct machines *machines,
2990 int (*fn)(struct thread *thread, void *p),
2991 void *priv)
2992{
2993 struct rb_node *nd;
2994 int rc = 0;
2995
2996 rc = machine__for_each_thread(&machines->host, fn, priv);
2997 if (rc != 0)
2998 return rc;
2999
3000 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3001 struct machine *machine = rb_entry(nd, struct machine, rb_node);
3002
3003 rc = machine__for_each_thread(machine, fn, priv);
3004 if (rc != 0)
3005 return rc;
3006 }
3007 return rc;
3008}
3009
3010
3011static int thread_list_cb(struct thread *thread, void *data)
3012{
3013 struct list_head *list = data;
3014 struct thread_list *entry = malloc(sizeof(*entry));
3015
3016 if (!entry)
3017 return -ENOMEM;
3018
3019 entry->thread = thread__get(thread);
3020 list_add_tail(&entry->list, list);
3021 return 0;
3022}
3023
3024int machine__thread_list(struct machine *machine, struct list_head *list)
3025{
3026 return machine__for_each_thread(machine, thread_list_cb, list);
3027}
3028
3029void thread_list__delete(struct list_head *list)
3030{
3031 struct thread_list *pos, *next;
3032
3033 list_for_each_entry_safe(pos, next, list, list) {
3034 thread__zput(pos->thread);
3035 list_del(&pos->list);
3036 free(pos);
3037 }
3038}
3039
3040pid_t machine__get_current_tid(struct machine *machine, int cpu)
3041{
3042 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3043 return -1;
3044
3045 return machine->current_tid[cpu];
3046}
3047
3048int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3049 pid_t tid)
3050{
3051 struct thread *thread;
3052 const pid_t init_val = -1;
3053
3054 if (cpu < 0)
3055 return -EINVAL;
3056
3057 if (realloc_array_as_needed(machine->current_tid,
3058 machine->current_tid_sz,
3059 (unsigned int)cpu,
3060 &init_val))
3061 return -ENOMEM;
3062
3063 machine->current_tid[cpu] = tid;
3064
3065 thread = machine__findnew_thread(machine, pid, tid);
3066 if (!thread)
3067 return -ENOMEM;
3068
3069 thread__set_cpu(thread, cpu);
3070 thread__put(thread);
3071
3072 return 0;
3073}
3074
3075/*
3076 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3077 * machine__normalized_is() if a normalized arch is needed.
3078 */
3079bool machine__is(struct machine *machine, const char *arch)
3080{
3081 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3082}
3083
3084bool machine__normalized_is(struct machine *machine, const char *arch)
3085{
3086 return machine && !strcmp(perf_env__arch(machine->env), arch);
3087}
3088
3089int machine__nr_cpus_avail(struct machine *machine)
3090{
3091 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3092}
3093
3094int machine__get_kernel_start(struct machine *machine)
3095{
3096 struct map *map = machine__kernel_map(machine);
3097 int err = 0;
3098
3099 /*
3100 * The only addresses above 2^63 are kernel addresses of a 64-bit
3101 * kernel. Note that addresses are unsigned so that on a 32-bit system
3102 * all addresses including kernel addresses are less than 2^32. In
3103 * that case (32-bit system), if the kernel mapping is unknown, all
3104 * addresses will be assumed to be in user space - see
3105 * machine__kernel_ip().
3106 */
3107 machine->kernel_start = 1ULL << 63;
3108 if (map) {
3109 err = map__load(map);
3110 /*
3111 * On x86_64, PTI entry trampolines are less than the
3112 * start of kernel text, but still above 2^63. So leave
3113 * kernel_start = 1ULL << 63 for x86_64.
3114 */
3115 if (!err && !machine__is(machine, "x86_64"))
3116 machine->kernel_start = map__start(map);
3117 }
3118 return err;
3119}
3120
3121u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3122{
3123 u8 addr_cpumode = cpumode;
3124 bool kernel_ip;
3125
3126 if (!machine->single_address_space)
3127 goto out;
3128
3129 kernel_ip = machine__kernel_ip(machine, addr);
3130 switch (cpumode) {
3131 case PERF_RECORD_MISC_KERNEL:
3132 case PERF_RECORD_MISC_USER:
3133 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3134 PERF_RECORD_MISC_USER;
3135 break;
3136 case PERF_RECORD_MISC_GUEST_KERNEL:
3137 case PERF_RECORD_MISC_GUEST_USER:
3138 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3139 PERF_RECORD_MISC_GUEST_USER;
3140 break;
3141 default:
3142 break;
3143 }
3144out:
3145 return addr_cpumode;
3146}
3147
3148struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename,
3149 const struct dso_id *id)
3150{
3151 return dsos__findnew_id(&machine->dsos, filename, id);
3152}
3153
3154struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3155{
3156 return machine__findnew_dso_id(machine, filename, NULL);
3157}
3158
3159char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3160{
3161 struct machine *machine = vmachine;
3162 struct map *map;
3163 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3164
3165 if (sym == NULL)
3166 return NULL;
3167
3168 *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL;
3169 *addrp = map__unmap_ip(map, sym->start);
3170 return sym->name;
3171}
3172
3173struct machine__for_each_dso_cb_args {
3174 struct machine *machine;
3175 machine__dso_t fn;
3176 void *priv;
3177};
3178
3179static int machine__for_each_dso_cb(struct dso *dso, void *data)
3180{
3181 struct machine__for_each_dso_cb_args *args = data;
3182
3183 return args->fn(dso, args->machine, args->priv);
3184}
3185
3186int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3187{
3188 struct machine__for_each_dso_cb_args args = {
3189 .machine = machine,
3190 .fn = fn,
3191 .priv = priv,
3192 };
3193
3194 return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args);
3195}
3196
3197int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3198{
3199 struct maps *maps = machine__kernel_maps(machine);
3200
3201 return maps__for_each_map(maps, fn, priv);
3202}
3203
3204bool machine__is_lock_function(struct machine *machine, u64 addr)
3205{
3206 if (!machine->sched.text_start) {
3207 struct map *kmap;
3208 struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3209
3210 if (!sym) {
3211 /* to avoid retry */
3212 machine->sched.text_start = 1;
3213 return false;
3214 }
3215
3216 machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3217
3218 /* should not fail from here */
3219 sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3220 machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3221
3222 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3223 machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3224
3225 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3226 machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3227
3228 sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap);
3229 if (sym) {
3230 machine->traceiter.text_start = map__unmap_ip(kmap, sym->start);
3231 machine->traceiter.text_end = map__unmap_ip(kmap, sym->end);
3232 }
3233 sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap);
3234 if (sym) {
3235 machine->trace.text_start = map__unmap_ip(kmap, sym->start);
3236 machine->trace.text_end = map__unmap_ip(kmap, sym->end);
3237 }
3238 }
3239
3240 /* failed to get kernel symbols */
3241 if (machine->sched.text_start == 1)
3242 return false;
3243
3244 /* mutex and rwsem functions are in sched text section */
3245 if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3246 return true;
3247
3248 /* spinlock functions are in lock text section */
3249 if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3250 return true;
3251
3252 /* traceiter functions currently don't have their own section
3253 * but we consider them lock functions
3254 */
3255 if (machine->traceiter.text_start != 0) {
3256 if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end)
3257 return true;
3258 }
3259
3260 if (machine->trace.text_start != 0) {
3261 if (machine->trace.text_start <= addr && addr < machine->trace.text_end)
3262 return true;
3263 }
3264
3265 return false;
3266}
3267
3268int machine__hit_all_dsos(struct machine *machine)
3269{
3270 return dsos__hit_all(&machine->dsos);
3271}