Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <inttypes.h>
5#include <regex.h>
6#include <stdlib.h>
7#include "callchain.h"
8#include "debug.h"
9#include "dso.h"
10#include "env.h"
11#include "event.h"
12#include "evsel.h"
13#include "hist.h"
14#include "machine.h"
15#include "map.h"
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
19#include "srcline.h"
20#include "symbol.h"
21#include "sort.h"
22#include "strlist.h"
23#include "target.h"
24#include "thread.h"
25#include "util.h"
26#include "vdso.h"
27#include <stdbool.h>
28#include <sys/types.h>
29#include <sys/stat.h>
30#include <unistd.h>
31#include "unwind.h"
32#include "linux/hash.h"
33#include "asm/bug.h"
34#include "bpf-event.h"
35#include <internal/lib.h> // page_size
36
37#include <linux/ctype.h>
38#include <symbol/kallsyms.h>
39#include <linux/mman.h>
40#include <linux/string.h>
41#include <linux/zalloc.h>
42
43static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
44
45static void dsos__init(struct dsos *dsos)
46{
47 INIT_LIST_HEAD(&dsos->head);
48 dsos->root = RB_ROOT;
49 init_rwsem(&dsos->lock);
50}
51
52static void machine__threads_init(struct machine *machine)
53{
54 int i;
55
56 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
57 struct threads *threads = &machine->threads[i];
58 threads->entries = RB_ROOT_CACHED;
59 init_rwsem(&threads->lock);
60 threads->nr = 0;
61 INIT_LIST_HEAD(&threads->dead);
62 threads->last_match = NULL;
63 }
64}
65
66static int machine__set_mmap_name(struct machine *machine)
67{
68 if (machine__is_host(machine))
69 machine->mmap_name = strdup("[kernel.kallsyms]");
70 else if (machine__is_default_guest(machine))
71 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
72 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
73 machine->pid) < 0)
74 machine->mmap_name = NULL;
75
76 return machine->mmap_name ? 0 : -ENOMEM;
77}
78
79int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
80{
81 int err = -ENOMEM;
82
83 memset(machine, 0, sizeof(*machine));
84 map_groups__init(&machine->kmaps, machine);
85 RB_CLEAR_NODE(&machine->rb_node);
86 dsos__init(&machine->dsos);
87
88 machine__threads_init(machine);
89
90 machine->vdso_info = NULL;
91 machine->env = NULL;
92
93 machine->pid = pid;
94
95 machine->id_hdr_size = 0;
96 machine->kptr_restrict_warned = false;
97 machine->comm_exec = false;
98 machine->kernel_start = 0;
99 machine->vmlinux_map = NULL;
100
101 machine->root_dir = strdup(root_dir);
102 if (machine->root_dir == NULL)
103 return -ENOMEM;
104
105 if (machine__set_mmap_name(machine))
106 goto out;
107
108 if (pid != HOST_KERNEL_ID) {
109 struct thread *thread = machine__findnew_thread(machine, -1,
110 pid);
111 char comm[64];
112
113 if (thread == NULL)
114 goto out;
115
116 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
117 thread__set_comm(thread, comm, 0);
118 thread__put(thread);
119 }
120
121 machine->current_tid = NULL;
122 err = 0;
123
124out:
125 if (err) {
126 zfree(&machine->root_dir);
127 zfree(&machine->mmap_name);
128 }
129 return 0;
130}
131
132struct machine *machine__new_host(void)
133{
134 struct machine *machine = malloc(sizeof(*machine));
135
136 if (machine != NULL) {
137 machine__init(machine, "", HOST_KERNEL_ID);
138
139 if (machine__create_kernel_maps(machine) < 0)
140 goto out_delete;
141 }
142
143 return machine;
144out_delete:
145 free(machine);
146 return NULL;
147}
148
149struct machine *machine__new_kallsyms(void)
150{
151 struct machine *machine = machine__new_host();
152 /*
153 * FIXME:
154 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
155 * ask for not using the kcore parsing code, once this one is fixed
156 * to create a map per module.
157 */
158 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
159 machine__delete(machine);
160 machine = NULL;
161 }
162
163 return machine;
164}
165
166static void dsos__purge(struct dsos *dsos)
167{
168 struct dso *pos, *n;
169
170 down_write(&dsos->lock);
171
172 list_for_each_entry_safe(pos, n, &dsos->head, node) {
173 RB_CLEAR_NODE(&pos->rb_node);
174 pos->root = NULL;
175 list_del_init(&pos->node);
176 dso__put(pos);
177 }
178
179 up_write(&dsos->lock);
180}
181
182static void dsos__exit(struct dsos *dsos)
183{
184 dsos__purge(dsos);
185 exit_rwsem(&dsos->lock);
186}
187
188void machine__delete_threads(struct machine *machine)
189{
190 struct rb_node *nd;
191 int i;
192
193 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
194 struct threads *threads = &machine->threads[i];
195 down_write(&threads->lock);
196 nd = rb_first_cached(&threads->entries);
197 while (nd) {
198 struct thread *t = rb_entry(nd, struct thread, rb_node);
199
200 nd = rb_next(nd);
201 __machine__remove_thread(machine, t, false);
202 }
203 up_write(&threads->lock);
204 }
205}
206
207void machine__exit(struct machine *machine)
208{
209 int i;
210
211 if (machine == NULL)
212 return;
213
214 machine__destroy_kernel_maps(machine);
215 map_groups__exit(&machine->kmaps);
216 dsos__exit(&machine->dsos);
217 machine__exit_vdso(machine);
218 zfree(&machine->root_dir);
219 zfree(&machine->mmap_name);
220 zfree(&machine->current_tid);
221
222 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
223 struct threads *threads = &machine->threads[i];
224 struct thread *thread, *n;
225 /*
226 * Forget about the dead, at this point whatever threads were
227 * left in the dead lists better have a reference count taken
228 * by who is using them, and then, when they drop those references
229 * and it finally hits zero, thread__put() will check and see that
230 * its not in the dead threads list and will not try to remove it
231 * from there, just calling thread__delete() straight away.
232 */
233 list_for_each_entry_safe(thread, n, &threads->dead, node)
234 list_del_init(&thread->node);
235
236 exit_rwsem(&threads->lock);
237 }
238}
239
240void machine__delete(struct machine *machine)
241{
242 if (machine) {
243 machine__exit(machine);
244 free(machine);
245 }
246}
247
248void machines__init(struct machines *machines)
249{
250 machine__init(&machines->host, "", HOST_KERNEL_ID);
251 machines->guests = RB_ROOT_CACHED;
252}
253
254void machines__exit(struct machines *machines)
255{
256 machine__exit(&machines->host);
257 /* XXX exit guest */
258}
259
260struct machine *machines__add(struct machines *machines, pid_t pid,
261 const char *root_dir)
262{
263 struct rb_node **p = &machines->guests.rb_root.rb_node;
264 struct rb_node *parent = NULL;
265 struct machine *pos, *machine = malloc(sizeof(*machine));
266 bool leftmost = true;
267
268 if (machine == NULL)
269 return NULL;
270
271 if (machine__init(machine, root_dir, pid) != 0) {
272 free(machine);
273 return NULL;
274 }
275
276 while (*p != NULL) {
277 parent = *p;
278 pos = rb_entry(parent, struct machine, rb_node);
279 if (pid < pos->pid)
280 p = &(*p)->rb_left;
281 else {
282 p = &(*p)->rb_right;
283 leftmost = false;
284 }
285 }
286
287 rb_link_node(&machine->rb_node, parent, p);
288 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
289
290 return machine;
291}
292
293void machines__set_comm_exec(struct machines *machines, bool comm_exec)
294{
295 struct rb_node *nd;
296
297 machines->host.comm_exec = comm_exec;
298
299 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
300 struct machine *machine = rb_entry(nd, struct machine, rb_node);
301
302 machine->comm_exec = comm_exec;
303 }
304}
305
306struct machine *machines__find(struct machines *machines, pid_t pid)
307{
308 struct rb_node **p = &machines->guests.rb_root.rb_node;
309 struct rb_node *parent = NULL;
310 struct machine *machine;
311 struct machine *default_machine = NULL;
312
313 if (pid == HOST_KERNEL_ID)
314 return &machines->host;
315
316 while (*p != NULL) {
317 parent = *p;
318 machine = rb_entry(parent, struct machine, rb_node);
319 if (pid < machine->pid)
320 p = &(*p)->rb_left;
321 else if (pid > machine->pid)
322 p = &(*p)->rb_right;
323 else
324 return machine;
325 if (!machine->pid)
326 default_machine = machine;
327 }
328
329 return default_machine;
330}
331
332struct machine *machines__findnew(struct machines *machines, pid_t pid)
333{
334 char path[PATH_MAX];
335 const char *root_dir = "";
336 struct machine *machine = machines__find(machines, pid);
337
338 if (machine && (machine->pid == pid))
339 goto out;
340
341 if ((pid != HOST_KERNEL_ID) &&
342 (pid != DEFAULT_GUEST_KERNEL_ID) &&
343 (symbol_conf.guestmount)) {
344 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
345 if (access(path, R_OK)) {
346 static struct strlist *seen;
347
348 if (!seen)
349 seen = strlist__new(NULL, NULL);
350
351 if (!strlist__has_entry(seen, path)) {
352 pr_err("Can't access file %s\n", path);
353 strlist__add(seen, path);
354 }
355 machine = NULL;
356 goto out;
357 }
358 root_dir = path;
359 }
360
361 machine = machines__add(machines, pid, root_dir);
362out:
363 return machine;
364}
365
366void machines__process_guests(struct machines *machines,
367 machine__process_t process, void *data)
368{
369 struct rb_node *nd;
370
371 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
372 struct machine *pos = rb_entry(nd, struct machine, rb_node);
373 process(pos, data);
374 }
375}
376
377void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
378{
379 struct rb_node *node;
380 struct machine *machine;
381
382 machines->host.id_hdr_size = id_hdr_size;
383
384 for (node = rb_first_cached(&machines->guests); node;
385 node = rb_next(node)) {
386 machine = rb_entry(node, struct machine, rb_node);
387 machine->id_hdr_size = id_hdr_size;
388 }
389
390 return;
391}
392
393static void machine__update_thread_pid(struct machine *machine,
394 struct thread *th, pid_t pid)
395{
396 struct thread *leader;
397
398 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
399 return;
400
401 th->pid_ = pid;
402
403 if (th->pid_ == th->tid)
404 return;
405
406 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
407 if (!leader)
408 goto out_err;
409
410 if (!leader->mg)
411 leader->mg = map_groups__new(machine);
412
413 if (!leader->mg)
414 goto out_err;
415
416 if (th->mg == leader->mg)
417 return;
418
419 if (th->mg) {
420 /*
421 * Maps are created from MMAP events which provide the pid and
422 * tid. Consequently there never should be any maps on a thread
423 * with an unknown pid. Just print an error if there are.
424 */
425 if (!map_groups__empty(th->mg))
426 pr_err("Discarding thread maps for %d:%d\n",
427 th->pid_, th->tid);
428 map_groups__put(th->mg);
429 }
430
431 th->mg = map_groups__get(leader->mg);
432out_put:
433 thread__put(leader);
434 return;
435out_err:
436 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
437 goto out_put;
438}
439
440/*
441 * Front-end cache - TID lookups come in blocks,
442 * so most of the time we dont have to look up
443 * the full rbtree:
444 */
445static struct thread*
446__threads__get_last_match(struct threads *threads, struct machine *machine,
447 int pid, int tid)
448{
449 struct thread *th;
450
451 th = threads->last_match;
452 if (th != NULL) {
453 if (th->tid == tid) {
454 machine__update_thread_pid(machine, th, pid);
455 return thread__get(th);
456 }
457
458 threads->last_match = NULL;
459 }
460
461 return NULL;
462}
463
464static struct thread*
465threads__get_last_match(struct threads *threads, struct machine *machine,
466 int pid, int tid)
467{
468 struct thread *th = NULL;
469
470 if (perf_singlethreaded)
471 th = __threads__get_last_match(threads, machine, pid, tid);
472
473 return th;
474}
475
476static void
477__threads__set_last_match(struct threads *threads, struct thread *th)
478{
479 threads->last_match = th;
480}
481
482static void
483threads__set_last_match(struct threads *threads, struct thread *th)
484{
485 if (perf_singlethreaded)
486 __threads__set_last_match(threads, th);
487}
488
489/*
490 * Caller must eventually drop thread->refcnt returned with a successful
491 * lookup/new thread inserted.
492 */
493static struct thread *____machine__findnew_thread(struct machine *machine,
494 struct threads *threads,
495 pid_t pid, pid_t tid,
496 bool create)
497{
498 struct rb_node **p = &threads->entries.rb_root.rb_node;
499 struct rb_node *parent = NULL;
500 struct thread *th;
501 bool leftmost = true;
502
503 th = threads__get_last_match(threads, machine, pid, tid);
504 if (th)
505 return th;
506
507 while (*p != NULL) {
508 parent = *p;
509 th = rb_entry(parent, struct thread, rb_node);
510
511 if (th->tid == tid) {
512 threads__set_last_match(threads, th);
513 machine__update_thread_pid(machine, th, pid);
514 return thread__get(th);
515 }
516
517 if (tid < th->tid)
518 p = &(*p)->rb_left;
519 else {
520 p = &(*p)->rb_right;
521 leftmost = false;
522 }
523 }
524
525 if (!create)
526 return NULL;
527
528 th = thread__new(pid, tid);
529 if (th != NULL) {
530 rb_link_node(&th->rb_node, parent, p);
531 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
532
533 /*
534 * We have to initialize map_groups separately
535 * after rb tree is updated.
536 *
537 * The reason is that we call machine__findnew_thread
538 * within thread__init_map_groups to find the thread
539 * leader and that would screwed the rb tree.
540 */
541 if (thread__init_map_groups(th, machine)) {
542 rb_erase_cached(&th->rb_node, &threads->entries);
543 RB_CLEAR_NODE(&th->rb_node);
544 thread__put(th);
545 return NULL;
546 }
547 /*
548 * It is now in the rbtree, get a ref
549 */
550 thread__get(th);
551 threads__set_last_match(threads, th);
552 ++threads->nr;
553 }
554
555 return th;
556}
557
558struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
559{
560 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
561}
562
563struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
564 pid_t tid)
565{
566 struct threads *threads = machine__threads(machine, tid);
567 struct thread *th;
568
569 down_write(&threads->lock);
570 th = __machine__findnew_thread(machine, pid, tid);
571 up_write(&threads->lock);
572 return th;
573}
574
575struct thread *machine__find_thread(struct machine *machine, pid_t pid,
576 pid_t tid)
577{
578 struct threads *threads = machine__threads(machine, tid);
579 struct thread *th;
580
581 down_read(&threads->lock);
582 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
583 up_read(&threads->lock);
584 return th;
585}
586
587struct comm *machine__thread_exec_comm(struct machine *machine,
588 struct thread *thread)
589{
590 if (machine->comm_exec)
591 return thread__exec_comm(thread);
592 else
593 return thread__comm(thread);
594}
595
596int machine__process_comm_event(struct machine *machine, union perf_event *event,
597 struct perf_sample *sample)
598{
599 struct thread *thread = machine__findnew_thread(machine,
600 event->comm.pid,
601 event->comm.tid);
602 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
603 int err = 0;
604
605 if (exec)
606 machine->comm_exec = true;
607
608 if (dump_trace)
609 perf_event__fprintf_comm(event, stdout);
610
611 if (thread == NULL ||
612 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
613 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
614 err = -1;
615 }
616
617 thread__put(thread);
618
619 return err;
620}
621
622int machine__process_namespaces_event(struct machine *machine __maybe_unused,
623 union perf_event *event,
624 struct perf_sample *sample __maybe_unused)
625{
626 struct thread *thread = machine__findnew_thread(machine,
627 event->namespaces.pid,
628 event->namespaces.tid);
629 int err = 0;
630
631 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
632 "\nWARNING: kernel seems to support more namespaces than perf"
633 " tool.\nTry updating the perf tool..\n\n");
634
635 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
636 "\nWARNING: perf tool seems to support more namespaces than"
637 " the kernel.\nTry updating the kernel..\n\n");
638
639 if (dump_trace)
640 perf_event__fprintf_namespaces(event, stdout);
641
642 if (thread == NULL ||
643 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
644 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
645 err = -1;
646 }
647
648 thread__put(thread);
649
650 return err;
651}
652
653int machine__process_lost_event(struct machine *machine __maybe_unused,
654 union perf_event *event, struct perf_sample *sample __maybe_unused)
655{
656 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
657 event->lost.id, event->lost.lost);
658 return 0;
659}
660
661int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
662 union perf_event *event, struct perf_sample *sample)
663{
664 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
665 sample->id, event->lost_samples.lost);
666 return 0;
667}
668
669static struct dso *machine__findnew_module_dso(struct machine *machine,
670 struct kmod_path *m,
671 const char *filename)
672{
673 struct dso *dso;
674
675 down_write(&machine->dsos.lock);
676
677 dso = __dsos__find(&machine->dsos, m->name, true);
678 if (!dso) {
679 dso = __dsos__addnew(&machine->dsos, m->name);
680 if (dso == NULL)
681 goto out_unlock;
682
683 dso__set_module_info(dso, m, machine);
684 dso__set_long_name(dso, strdup(filename), true);
685 }
686
687 dso__get(dso);
688out_unlock:
689 up_write(&machine->dsos.lock);
690 return dso;
691}
692
693int machine__process_aux_event(struct machine *machine __maybe_unused,
694 union perf_event *event)
695{
696 if (dump_trace)
697 perf_event__fprintf_aux(event, stdout);
698 return 0;
699}
700
701int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
702 union perf_event *event)
703{
704 if (dump_trace)
705 perf_event__fprintf_itrace_start(event, stdout);
706 return 0;
707}
708
709int machine__process_switch_event(struct machine *machine __maybe_unused,
710 union perf_event *event)
711{
712 if (dump_trace)
713 perf_event__fprintf_switch(event, stdout);
714 return 0;
715}
716
717static int machine__process_ksymbol_register(struct machine *machine,
718 union perf_event *event,
719 struct perf_sample *sample __maybe_unused)
720{
721 struct symbol *sym;
722 struct map *map;
723
724 map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
725 if (!map) {
726 map = dso__new_map(event->ksymbol.name);
727 if (!map)
728 return -ENOMEM;
729
730 map->start = event->ksymbol.addr;
731 map->end = map->start + event->ksymbol.len;
732 map_groups__insert(&machine->kmaps, map);
733 }
734
735 sym = symbol__new(map->map_ip(map, map->start),
736 event->ksymbol.len,
737 0, 0, event->ksymbol.name);
738 if (!sym)
739 return -ENOMEM;
740 dso__insert_symbol(map->dso, sym);
741 return 0;
742}
743
744static int machine__process_ksymbol_unregister(struct machine *machine,
745 union perf_event *event,
746 struct perf_sample *sample __maybe_unused)
747{
748 struct map *map;
749
750 map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
751 if (map)
752 map_groups__remove(&machine->kmaps, map);
753
754 return 0;
755}
756
757int machine__process_ksymbol(struct machine *machine __maybe_unused,
758 union perf_event *event,
759 struct perf_sample *sample)
760{
761 if (dump_trace)
762 perf_event__fprintf_ksymbol(event, stdout);
763
764 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
765 return machine__process_ksymbol_unregister(machine, event,
766 sample);
767 return machine__process_ksymbol_register(machine, event, sample);
768}
769
770static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
771{
772 const char *dup_filename;
773
774 if (!filename || !dso || !dso->long_name)
775 return;
776 if (dso->long_name[0] != '[')
777 return;
778 if (!strchr(filename, '/'))
779 return;
780
781 dup_filename = strdup(filename);
782 if (!dup_filename)
783 return;
784
785 dso__set_long_name(dso, dup_filename, true);
786}
787
788struct map *machine__findnew_module_map(struct machine *machine, u64 start,
789 const char *filename)
790{
791 struct map *map = NULL;
792 struct dso *dso = NULL;
793 struct kmod_path m;
794
795 if (kmod_path__parse_name(&m, filename))
796 return NULL;
797
798 map = map_groups__find_by_name(&machine->kmaps, m.name);
799 if (map) {
800 /*
801 * If the map's dso is an offline module, give dso__load()
802 * a chance to find the file path of that module by fixing
803 * long_name.
804 */
805 dso__adjust_kmod_long_name(map->dso, filename);
806 goto out;
807 }
808
809 dso = machine__findnew_module_dso(machine, &m, filename);
810 if (dso == NULL)
811 goto out;
812
813 map = map__new2(start, dso);
814 if (map == NULL)
815 goto out;
816
817 map_groups__insert(&machine->kmaps, map);
818
819 /* Put the map here because map_groups__insert alread got it */
820 map__put(map);
821out:
822 /* put the dso here, corresponding to machine__findnew_module_dso */
823 dso__put(dso);
824 zfree(&m.name);
825 return map;
826}
827
828size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
829{
830 struct rb_node *nd;
831 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
832
833 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
834 struct machine *pos = rb_entry(nd, struct machine, rb_node);
835 ret += __dsos__fprintf(&pos->dsos.head, fp);
836 }
837
838 return ret;
839}
840
841size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
842 bool (skip)(struct dso *dso, int parm), int parm)
843{
844 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
845}
846
847size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
848 bool (skip)(struct dso *dso, int parm), int parm)
849{
850 struct rb_node *nd;
851 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
852
853 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
854 struct machine *pos = rb_entry(nd, struct machine, rb_node);
855 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
856 }
857 return ret;
858}
859
860size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
861{
862 int i;
863 size_t printed = 0;
864 struct dso *kdso = machine__kernel_map(machine)->dso;
865
866 if (kdso->has_build_id) {
867 char filename[PATH_MAX];
868 if (dso__build_id_filename(kdso, filename, sizeof(filename),
869 false))
870 printed += fprintf(fp, "[0] %s\n", filename);
871 }
872
873 for (i = 0; i < vmlinux_path__nr_entries; ++i)
874 printed += fprintf(fp, "[%d] %s\n",
875 i + kdso->has_build_id, vmlinux_path[i]);
876
877 return printed;
878}
879
880size_t machine__fprintf(struct machine *machine, FILE *fp)
881{
882 struct rb_node *nd;
883 size_t ret;
884 int i;
885
886 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
887 struct threads *threads = &machine->threads[i];
888
889 down_read(&threads->lock);
890
891 ret = fprintf(fp, "Threads: %u\n", threads->nr);
892
893 for (nd = rb_first_cached(&threads->entries); nd;
894 nd = rb_next(nd)) {
895 struct thread *pos = rb_entry(nd, struct thread, rb_node);
896
897 ret += thread__fprintf(pos, fp);
898 }
899
900 up_read(&threads->lock);
901 }
902 return ret;
903}
904
905static struct dso *machine__get_kernel(struct machine *machine)
906{
907 const char *vmlinux_name = machine->mmap_name;
908 struct dso *kernel;
909
910 if (machine__is_host(machine)) {
911 if (symbol_conf.vmlinux_name)
912 vmlinux_name = symbol_conf.vmlinux_name;
913
914 kernel = machine__findnew_kernel(machine, vmlinux_name,
915 "[kernel]", DSO_TYPE_KERNEL);
916 } else {
917 if (symbol_conf.default_guest_vmlinux_name)
918 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
919
920 kernel = machine__findnew_kernel(machine, vmlinux_name,
921 "[guest.kernel]",
922 DSO_TYPE_GUEST_KERNEL);
923 }
924
925 if (kernel != NULL && (!kernel->has_build_id))
926 dso__read_running_kernel_build_id(kernel, machine);
927
928 return kernel;
929}
930
931struct process_args {
932 u64 start;
933};
934
935void machine__get_kallsyms_filename(struct machine *machine, char *buf,
936 size_t bufsz)
937{
938 if (machine__is_default_guest(machine))
939 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
940 else
941 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
942}
943
944const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
945
946/* Figure out the start address of kernel map from /proc/kallsyms.
947 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
948 * symbol_name if it's not that important.
949 */
950static int machine__get_running_kernel_start(struct machine *machine,
951 const char **symbol_name,
952 u64 *start, u64 *end)
953{
954 char filename[PATH_MAX];
955 int i, err = -1;
956 const char *name;
957 u64 addr = 0;
958
959 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
960
961 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
962 return 0;
963
964 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
965 err = kallsyms__get_function_start(filename, name, &addr);
966 if (!err)
967 break;
968 }
969
970 if (err)
971 return -1;
972
973 if (symbol_name)
974 *symbol_name = name;
975
976 *start = addr;
977
978 err = kallsyms__get_function_start(filename, "_etext", &addr);
979 if (!err)
980 *end = addr;
981
982 return 0;
983}
984
985int machine__create_extra_kernel_map(struct machine *machine,
986 struct dso *kernel,
987 struct extra_kernel_map *xm)
988{
989 struct kmap *kmap;
990 struct map *map;
991
992 map = map__new2(xm->start, kernel);
993 if (!map)
994 return -1;
995
996 map->end = xm->end;
997 map->pgoff = xm->pgoff;
998
999 kmap = map__kmap(map);
1000
1001 kmap->kmaps = &machine->kmaps;
1002 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1003
1004 map_groups__insert(&machine->kmaps, map);
1005
1006 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1007 kmap->name, map->start, map->end);
1008
1009 map__put(map);
1010
1011 return 0;
1012}
1013
1014static u64 find_entry_trampoline(struct dso *dso)
1015{
1016 /* Duplicates are removed so lookup all aliases */
1017 const char *syms[] = {
1018 "_entry_trampoline",
1019 "__entry_trampoline_start",
1020 "entry_SYSCALL_64_trampoline",
1021 };
1022 struct symbol *sym = dso__first_symbol(dso);
1023 unsigned int i;
1024
1025 for (; sym; sym = dso__next_symbol(sym)) {
1026 if (sym->binding != STB_GLOBAL)
1027 continue;
1028 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1029 if (!strcmp(sym->name, syms[i]))
1030 return sym->start;
1031 }
1032 }
1033
1034 return 0;
1035}
1036
1037/*
1038 * These values can be used for kernels that do not have symbols for the entry
1039 * trampolines in kallsyms.
1040 */
1041#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1042#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1043#define X86_64_ENTRY_TRAMPOLINE 0x6000
1044
1045/* Map x86_64 PTI entry trampolines */
1046int machine__map_x86_64_entry_trampolines(struct machine *machine,
1047 struct dso *kernel)
1048{
1049 struct map_groups *kmaps = &machine->kmaps;
1050 struct maps *maps = &kmaps->maps;
1051 int nr_cpus_avail, cpu;
1052 bool found = false;
1053 struct map *map;
1054 u64 pgoff;
1055
1056 /*
1057 * In the vmlinux case, pgoff is a virtual address which must now be
1058 * mapped to a vmlinux offset.
1059 */
1060 for (map = maps__first(maps); map; map = map__next(map)) {
1061 struct kmap *kmap = __map__kmap(map);
1062 struct map *dest_map;
1063
1064 if (!kmap || !is_entry_trampoline(kmap->name))
1065 continue;
1066
1067 dest_map = map_groups__find(kmaps, map->pgoff);
1068 if (dest_map != map)
1069 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1070 found = true;
1071 }
1072 if (found || machine->trampolines_mapped)
1073 return 0;
1074
1075 pgoff = find_entry_trampoline(kernel);
1076 if (!pgoff)
1077 return 0;
1078
1079 nr_cpus_avail = machine__nr_cpus_avail(machine);
1080
1081 /* Add a 1 page map for each CPU's entry trampoline */
1082 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1083 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1084 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1085 X86_64_ENTRY_TRAMPOLINE;
1086 struct extra_kernel_map xm = {
1087 .start = va,
1088 .end = va + page_size,
1089 .pgoff = pgoff,
1090 };
1091
1092 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1093
1094 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1095 return -1;
1096 }
1097
1098 machine->trampolines_mapped = nr_cpus_avail;
1099
1100 return 0;
1101}
1102
1103int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1104 struct dso *kernel __maybe_unused)
1105{
1106 return 0;
1107}
1108
1109static int
1110__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1111{
1112 struct kmap *kmap;
1113 struct map *map;
1114
1115 /* In case of renewal the kernel map, destroy previous one */
1116 machine__destroy_kernel_maps(machine);
1117
1118 machine->vmlinux_map = map__new2(0, kernel);
1119 if (machine->vmlinux_map == NULL)
1120 return -1;
1121
1122 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1123 map = machine__kernel_map(machine);
1124 kmap = map__kmap(map);
1125 if (!kmap)
1126 return -1;
1127
1128 kmap->kmaps = &machine->kmaps;
1129 map_groups__insert(&machine->kmaps, map);
1130
1131 return 0;
1132}
1133
1134void machine__destroy_kernel_maps(struct machine *machine)
1135{
1136 struct kmap *kmap;
1137 struct map *map = machine__kernel_map(machine);
1138
1139 if (map == NULL)
1140 return;
1141
1142 kmap = map__kmap(map);
1143 map_groups__remove(&machine->kmaps, map);
1144 if (kmap && kmap->ref_reloc_sym) {
1145 zfree((char **)&kmap->ref_reloc_sym->name);
1146 zfree(&kmap->ref_reloc_sym);
1147 }
1148
1149 map__zput(machine->vmlinux_map);
1150}
1151
1152int machines__create_guest_kernel_maps(struct machines *machines)
1153{
1154 int ret = 0;
1155 struct dirent **namelist = NULL;
1156 int i, items = 0;
1157 char path[PATH_MAX];
1158 pid_t pid;
1159 char *endp;
1160
1161 if (symbol_conf.default_guest_vmlinux_name ||
1162 symbol_conf.default_guest_modules ||
1163 symbol_conf.default_guest_kallsyms) {
1164 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1165 }
1166
1167 if (symbol_conf.guestmount) {
1168 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1169 if (items <= 0)
1170 return -ENOENT;
1171 for (i = 0; i < items; i++) {
1172 if (!isdigit(namelist[i]->d_name[0])) {
1173 /* Filter out . and .. */
1174 continue;
1175 }
1176 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1177 if ((*endp != '\0') ||
1178 (endp == namelist[i]->d_name) ||
1179 (errno == ERANGE)) {
1180 pr_debug("invalid directory (%s). Skipping.\n",
1181 namelist[i]->d_name);
1182 continue;
1183 }
1184 sprintf(path, "%s/%s/proc/kallsyms",
1185 symbol_conf.guestmount,
1186 namelist[i]->d_name);
1187 ret = access(path, R_OK);
1188 if (ret) {
1189 pr_debug("Can't access file %s\n", path);
1190 goto failure;
1191 }
1192 machines__create_kernel_maps(machines, pid);
1193 }
1194failure:
1195 free(namelist);
1196 }
1197
1198 return ret;
1199}
1200
1201void machines__destroy_kernel_maps(struct machines *machines)
1202{
1203 struct rb_node *next = rb_first_cached(&machines->guests);
1204
1205 machine__destroy_kernel_maps(&machines->host);
1206
1207 while (next) {
1208 struct machine *pos = rb_entry(next, struct machine, rb_node);
1209
1210 next = rb_next(&pos->rb_node);
1211 rb_erase_cached(&pos->rb_node, &machines->guests);
1212 machine__delete(pos);
1213 }
1214}
1215
1216int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1217{
1218 struct machine *machine = machines__findnew(machines, pid);
1219
1220 if (machine == NULL)
1221 return -1;
1222
1223 return machine__create_kernel_maps(machine);
1224}
1225
1226int machine__load_kallsyms(struct machine *machine, const char *filename)
1227{
1228 struct map *map = machine__kernel_map(machine);
1229 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1230
1231 if (ret > 0) {
1232 dso__set_loaded(map->dso);
1233 /*
1234 * Since /proc/kallsyms will have multiple sessions for the
1235 * kernel, with modules between them, fixup the end of all
1236 * sections.
1237 */
1238 map_groups__fixup_end(&machine->kmaps);
1239 }
1240
1241 return ret;
1242}
1243
1244int machine__load_vmlinux_path(struct machine *machine)
1245{
1246 struct map *map = machine__kernel_map(machine);
1247 int ret = dso__load_vmlinux_path(map->dso, map);
1248
1249 if (ret > 0)
1250 dso__set_loaded(map->dso);
1251
1252 return ret;
1253}
1254
1255static char *get_kernel_version(const char *root_dir)
1256{
1257 char version[PATH_MAX];
1258 FILE *file;
1259 char *name, *tmp;
1260 const char *prefix = "Linux version ";
1261
1262 sprintf(version, "%s/proc/version", root_dir);
1263 file = fopen(version, "r");
1264 if (!file)
1265 return NULL;
1266
1267 tmp = fgets(version, sizeof(version), file);
1268 fclose(file);
1269 if (!tmp)
1270 return NULL;
1271
1272 name = strstr(version, prefix);
1273 if (!name)
1274 return NULL;
1275 name += strlen(prefix);
1276 tmp = strchr(name, ' ');
1277 if (tmp)
1278 *tmp = '\0';
1279
1280 return strdup(name);
1281}
1282
1283static bool is_kmod_dso(struct dso *dso)
1284{
1285 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1286 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1287}
1288
1289static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1290 struct kmod_path *m)
1291{
1292 char *long_name;
1293 struct map *map = map_groups__find_by_name(mg, m->name);
1294
1295 if (map == NULL)
1296 return 0;
1297
1298 long_name = strdup(path);
1299 if (long_name == NULL)
1300 return -ENOMEM;
1301
1302 dso__set_long_name(map->dso, long_name, true);
1303 dso__kernel_module_get_build_id(map->dso, "");
1304
1305 /*
1306 * Full name could reveal us kmod compression, so
1307 * we need to update the symtab_type if needed.
1308 */
1309 if (m->comp && is_kmod_dso(map->dso)) {
1310 map->dso->symtab_type++;
1311 map->dso->comp = m->comp;
1312 }
1313
1314 return 0;
1315}
1316
1317static int map_groups__set_modules_path_dir(struct map_groups *mg,
1318 const char *dir_name, int depth)
1319{
1320 struct dirent *dent;
1321 DIR *dir = opendir(dir_name);
1322 int ret = 0;
1323
1324 if (!dir) {
1325 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1326 return -1;
1327 }
1328
1329 while ((dent = readdir(dir)) != NULL) {
1330 char path[PATH_MAX];
1331 struct stat st;
1332
1333 /*sshfs might return bad dent->d_type, so we have to stat*/
1334 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1335 if (stat(path, &st))
1336 continue;
1337
1338 if (S_ISDIR(st.st_mode)) {
1339 if (!strcmp(dent->d_name, ".") ||
1340 !strcmp(dent->d_name, ".."))
1341 continue;
1342
1343 /* Do not follow top-level source and build symlinks */
1344 if (depth == 0) {
1345 if (!strcmp(dent->d_name, "source") ||
1346 !strcmp(dent->d_name, "build"))
1347 continue;
1348 }
1349
1350 ret = map_groups__set_modules_path_dir(mg, path,
1351 depth + 1);
1352 if (ret < 0)
1353 goto out;
1354 } else {
1355 struct kmod_path m;
1356
1357 ret = kmod_path__parse_name(&m, dent->d_name);
1358 if (ret)
1359 goto out;
1360
1361 if (m.kmod)
1362 ret = map_groups__set_module_path(mg, path, &m);
1363
1364 zfree(&m.name);
1365
1366 if (ret)
1367 goto out;
1368 }
1369 }
1370
1371out:
1372 closedir(dir);
1373 return ret;
1374}
1375
1376static int machine__set_modules_path(struct machine *machine)
1377{
1378 char *version;
1379 char modules_path[PATH_MAX];
1380
1381 version = get_kernel_version(machine->root_dir);
1382 if (!version)
1383 return -1;
1384
1385 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1386 machine->root_dir, version);
1387 free(version);
1388
1389 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1390}
1391int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1392 u64 *size __maybe_unused,
1393 const char *name __maybe_unused)
1394{
1395 return 0;
1396}
1397
1398static int machine__create_module(void *arg, const char *name, u64 start,
1399 u64 size)
1400{
1401 struct machine *machine = arg;
1402 struct map *map;
1403
1404 if (arch__fix_module_text_start(&start, &size, name) < 0)
1405 return -1;
1406
1407 map = machine__findnew_module_map(machine, start, name);
1408 if (map == NULL)
1409 return -1;
1410 map->end = start + size;
1411
1412 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1413
1414 return 0;
1415}
1416
1417static int machine__create_modules(struct machine *machine)
1418{
1419 const char *modules;
1420 char path[PATH_MAX];
1421
1422 if (machine__is_default_guest(machine)) {
1423 modules = symbol_conf.default_guest_modules;
1424 } else {
1425 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1426 modules = path;
1427 }
1428
1429 if (symbol__restricted_filename(modules, "/proc/modules"))
1430 return -1;
1431
1432 if (modules__parse(modules, machine, machine__create_module))
1433 return -1;
1434
1435 if (!machine__set_modules_path(machine))
1436 return 0;
1437
1438 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1439
1440 return 0;
1441}
1442
1443static void machine__set_kernel_mmap(struct machine *machine,
1444 u64 start, u64 end)
1445{
1446 machine->vmlinux_map->start = start;
1447 machine->vmlinux_map->end = end;
1448 /*
1449 * Be a bit paranoid here, some perf.data file came with
1450 * a zero sized synthesized MMAP event for the kernel.
1451 */
1452 if (start == 0 && end == 0)
1453 machine->vmlinux_map->end = ~0ULL;
1454}
1455
1456static void machine__update_kernel_mmap(struct machine *machine,
1457 u64 start, u64 end)
1458{
1459 struct map *map = machine__kernel_map(machine);
1460
1461 map__get(map);
1462 map_groups__remove(&machine->kmaps, map);
1463
1464 machine__set_kernel_mmap(machine, start, end);
1465
1466 map_groups__insert(&machine->kmaps, map);
1467 map__put(map);
1468}
1469
1470int machine__create_kernel_maps(struct machine *machine)
1471{
1472 struct dso *kernel = machine__get_kernel(machine);
1473 const char *name = NULL;
1474 struct map *map;
1475 u64 start = 0, end = ~0ULL;
1476 int ret;
1477
1478 if (kernel == NULL)
1479 return -1;
1480
1481 ret = __machine__create_kernel_maps(machine, kernel);
1482 if (ret < 0)
1483 goto out_put;
1484
1485 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1486 if (machine__is_host(machine))
1487 pr_debug("Problems creating module maps, "
1488 "continuing anyway...\n");
1489 else
1490 pr_debug("Problems creating module maps for guest %d, "
1491 "continuing anyway...\n", machine->pid);
1492 }
1493
1494 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1495 if (name &&
1496 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1497 machine__destroy_kernel_maps(machine);
1498 ret = -1;
1499 goto out_put;
1500 }
1501
1502 /*
1503 * we have a real start address now, so re-order the kmaps
1504 * assume it's the last in the kmaps
1505 */
1506 machine__update_kernel_mmap(machine, start, end);
1507 }
1508
1509 if (machine__create_extra_kernel_maps(machine, kernel))
1510 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1511
1512 if (end == ~0ULL) {
1513 /* update end address of the kernel map using adjacent module address */
1514 map = map__next(machine__kernel_map(machine));
1515 if (map)
1516 machine__set_kernel_mmap(machine, start, map->start);
1517 }
1518
1519out_put:
1520 dso__put(kernel);
1521 return ret;
1522}
1523
1524static bool machine__uses_kcore(struct machine *machine)
1525{
1526 struct dso *dso;
1527
1528 list_for_each_entry(dso, &machine->dsos.head, node) {
1529 if (dso__is_kcore(dso))
1530 return true;
1531 }
1532
1533 return false;
1534}
1535
1536static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1537 union perf_event *event)
1538{
1539 return machine__is(machine, "x86_64") &&
1540 is_entry_trampoline(event->mmap.filename);
1541}
1542
1543static int machine__process_extra_kernel_map(struct machine *machine,
1544 union perf_event *event)
1545{
1546 struct map *kernel_map = machine__kernel_map(machine);
1547 struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
1548 struct extra_kernel_map xm = {
1549 .start = event->mmap.start,
1550 .end = event->mmap.start + event->mmap.len,
1551 .pgoff = event->mmap.pgoff,
1552 };
1553
1554 if (kernel == NULL)
1555 return -1;
1556
1557 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1558
1559 return machine__create_extra_kernel_map(machine, kernel, &xm);
1560}
1561
1562static int machine__process_kernel_mmap_event(struct machine *machine,
1563 union perf_event *event)
1564{
1565 struct map *map;
1566 enum dso_kernel_type kernel_type;
1567 bool is_kernel_mmap;
1568
1569 /* If we have maps from kcore then we do not need or want any others */
1570 if (machine__uses_kcore(machine))
1571 return 0;
1572
1573 if (machine__is_host(machine))
1574 kernel_type = DSO_TYPE_KERNEL;
1575 else
1576 kernel_type = DSO_TYPE_GUEST_KERNEL;
1577
1578 is_kernel_mmap = memcmp(event->mmap.filename,
1579 machine->mmap_name,
1580 strlen(machine->mmap_name) - 1) == 0;
1581 if (event->mmap.filename[0] == '/' ||
1582 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1583 map = machine__findnew_module_map(machine, event->mmap.start,
1584 event->mmap.filename);
1585 if (map == NULL)
1586 goto out_problem;
1587
1588 map->end = map->start + event->mmap.len;
1589 } else if (is_kernel_mmap) {
1590 const char *symbol_name = (event->mmap.filename +
1591 strlen(machine->mmap_name));
1592 /*
1593 * Should be there already, from the build-id table in
1594 * the header.
1595 */
1596 struct dso *kernel = NULL;
1597 struct dso *dso;
1598
1599 down_read(&machine->dsos.lock);
1600
1601 list_for_each_entry(dso, &machine->dsos.head, node) {
1602
1603 /*
1604 * The cpumode passed to is_kernel_module is not the
1605 * cpumode of *this* event. If we insist on passing
1606 * correct cpumode to is_kernel_module, we should
1607 * record the cpumode when we adding this dso to the
1608 * linked list.
1609 *
1610 * However we don't really need passing correct
1611 * cpumode. We know the correct cpumode must be kernel
1612 * mode (if not, we should not link it onto kernel_dsos
1613 * list).
1614 *
1615 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1616 * is_kernel_module() treats it as a kernel cpumode.
1617 */
1618
1619 if (!dso->kernel ||
1620 is_kernel_module(dso->long_name,
1621 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1622 continue;
1623
1624
1625 kernel = dso;
1626 break;
1627 }
1628
1629 up_read(&machine->dsos.lock);
1630
1631 if (kernel == NULL)
1632 kernel = machine__findnew_dso(machine, machine->mmap_name);
1633 if (kernel == NULL)
1634 goto out_problem;
1635
1636 kernel->kernel = kernel_type;
1637 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1638 dso__put(kernel);
1639 goto out_problem;
1640 }
1641
1642 if (strstr(kernel->long_name, "vmlinux"))
1643 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1644
1645 machine__update_kernel_mmap(machine, event->mmap.start,
1646 event->mmap.start + event->mmap.len);
1647
1648 /*
1649 * Avoid using a zero address (kptr_restrict) for the ref reloc
1650 * symbol. Effectively having zero here means that at record
1651 * time /proc/sys/kernel/kptr_restrict was non zero.
1652 */
1653 if (event->mmap.pgoff != 0) {
1654 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1655 symbol_name,
1656 event->mmap.pgoff);
1657 }
1658
1659 if (machine__is_default_guest(machine)) {
1660 /*
1661 * preload dso of guest kernel and modules
1662 */
1663 dso__load(kernel, machine__kernel_map(machine));
1664 }
1665 } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1666 return machine__process_extra_kernel_map(machine, event);
1667 }
1668 return 0;
1669out_problem:
1670 return -1;
1671}
1672
1673int machine__process_mmap2_event(struct machine *machine,
1674 union perf_event *event,
1675 struct perf_sample *sample)
1676{
1677 struct thread *thread;
1678 struct map *map;
1679 int ret = 0;
1680
1681 if (dump_trace)
1682 perf_event__fprintf_mmap2(event, stdout);
1683
1684 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1685 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1686 ret = machine__process_kernel_mmap_event(machine, event);
1687 if (ret < 0)
1688 goto out_problem;
1689 return 0;
1690 }
1691
1692 thread = machine__findnew_thread(machine, event->mmap2.pid,
1693 event->mmap2.tid);
1694 if (thread == NULL)
1695 goto out_problem;
1696
1697 map = map__new(machine, event->mmap2.start,
1698 event->mmap2.len, event->mmap2.pgoff,
1699 event->mmap2.maj,
1700 event->mmap2.min, event->mmap2.ino,
1701 event->mmap2.ino_generation,
1702 event->mmap2.prot,
1703 event->mmap2.flags,
1704 event->mmap2.filename, thread);
1705
1706 if (map == NULL)
1707 goto out_problem_map;
1708
1709 ret = thread__insert_map(thread, map);
1710 if (ret)
1711 goto out_problem_insert;
1712
1713 thread__put(thread);
1714 map__put(map);
1715 return 0;
1716
1717out_problem_insert:
1718 map__put(map);
1719out_problem_map:
1720 thread__put(thread);
1721out_problem:
1722 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1723 return 0;
1724}
1725
1726int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1727 struct perf_sample *sample)
1728{
1729 struct thread *thread;
1730 struct map *map;
1731 u32 prot = 0;
1732 int ret = 0;
1733
1734 if (dump_trace)
1735 perf_event__fprintf_mmap(event, stdout);
1736
1737 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1738 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1739 ret = machine__process_kernel_mmap_event(machine, event);
1740 if (ret < 0)
1741 goto out_problem;
1742 return 0;
1743 }
1744
1745 thread = machine__findnew_thread(machine, event->mmap.pid,
1746 event->mmap.tid);
1747 if (thread == NULL)
1748 goto out_problem;
1749
1750 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1751 prot = PROT_EXEC;
1752
1753 map = map__new(machine, event->mmap.start,
1754 event->mmap.len, event->mmap.pgoff,
1755 0, 0, 0, 0, prot, 0,
1756 event->mmap.filename,
1757 thread);
1758
1759 if (map == NULL)
1760 goto out_problem_map;
1761
1762 ret = thread__insert_map(thread, map);
1763 if (ret)
1764 goto out_problem_insert;
1765
1766 thread__put(thread);
1767 map__put(map);
1768 return 0;
1769
1770out_problem_insert:
1771 map__put(map);
1772out_problem_map:
1773 thread__put(thread);
1774out_problem:
1775 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1776 return 0;
1777}
1778
1779static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1780{
1781 struct threads *threads = machine__threads(machine, th->tid);
1782
1783 if (threads->last_match == th)
1784 threads__set_last_match(threads, NULL);
1785
1786 if (lock)
1787 down_write(&threads->lock);
1788
1789 BUG_ON(refcount_read(&th->refcnt) == 0);
1790
1791 rb_erase_cached(&th->rb_node, &threads->entries);
1792 RB_CLEAR_NODE(&th->rb_node);
1793 --threads->nr;
1794 /*
1795 * Move it first to the dead_threads list, then drop the reference,
1796 * if this is the last reference, then the thread__delete destructor
1797 * will be called and we will remove it from the dead_threads list.
1798 */
1799 list_add_tail(&th->node, &threads->dead);
1800
1801 /*
1802 * We need to do the put here because if this is the last refcount,
1803 * then we will be touching the threads->dead head when removing the
1804 * thread.
1805 */
1806 thread__put(th);
1807
1808 if (lock)
1809 up_write(&threads->lock);
1810}
1811
1812void machine__remove_thread(struct machine *machine, struct thread *th)
1813{
1814 return __machine__remove_thread(machine, th, true);
1815}
1816
1817int machine__process_fork_event(struct machine *machine, union perf_event *event,
1818 struct perf_sample *sample)
1819{
1820 struct thread *thread = machine__find_thread(machine,
1821 event->fork.pid,
1822 event->fork.tid);
1823 struct thread *parent = machine__findnew_thread(machine,
1824 event->fork.ppid,
1825 event->fork.ptid);
1826 bool do_maps_clone = true;
1827 int err = 0;
1828
1829 if (dump_trace)
1830 perf_event__fprintf_task(event, stdout);
1831
1832 /*
1833 * There may be an existing thread that is not actually the parent,
1834 * either because we are processing events out of order, or because the
1835 * (fork) event that would have removed the thread was lost. Assume the
1836 * latter case and continue on as best we can.
1837 */
1838 if (parent->pid_ != (pid_t)event->fork.ppid) {
1839 dump_printf("removing erroneous parent thread %d/%d\n",
1840 parent->pid_, parent->tid);
1841 machine__remove_thread(machine, parent);
1842 thread__put(parent);
1843 parent = machine__findnew_thread(machine, event->fork.ppid,
1844 event->fork.ptid);
1845 }
1846
1847 /* if a thread currently exists for the thread id remove it */
1848 if (thread != NULL) {
1849 machine__remove_thread(machine, thread);
1850 thread__put(thread);
1851 }
1852
1853 thread = machine__findnew_thread(machine, event->fork.pid,
1854 event->fork.tid);
1855 /*
1856 * When synthesizing FORK events, we are trying to create thread
1857 * objects for the already running tasks on the machine.
1858 *
1859 * Normally, for a kernel FORK event, we want to clone the parent's
1860 * maps because that is what the kernel just did.
1861 *
1862 * But when synthesizing, this should not be done. If we do, we end up
1863 * with overlapping maps as we process the sythesized MMAP2 events that
1864 * get delivered shortly thereafter.
1865 *
1866 * Use the FORK event misc flags in an internal way to signal this
1867 * situation, so we can elide the map clone when appropriate.
1868 */
1869 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1870 do_maps_clone = false;
1871
1872 if (thread == NULL || parent == NULL ||
1873 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1874 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1875 err = -1;
1876 }
1877 thread__put(thread);
1878 thread__put(parent);
1879
1880 return err;
1881}
1882
1883int machine__process_exit_event(struct machine *machine, union perf_event *event,
1884 struct perf_sample *sample __maybe_unused)
1885{
1886 struct thread *thread = machine__find_thread(machine,
1887 event->fork.pid,
1888 event->fork.tid);
1889
1890 if (dump_trace)
1891 perf_event__fprintf_task(event, stdout);
1892
1893 if (thread != NULL) {
1894 thread__exited(thread);
1895 thread__put(thread);
1896 }
1897
1898 return 0;
1899}
1900
1901int machine__process_event(struct machine *machine, union perf_event *event,
1902 struct perf_sample *sample)
1903{
1904 int ret;
1905
1906 switch (event->header.type) {
1907 case PERF_RECORD_COMM:
1908 ret = machine__process_comm_event(machine, event, sample); break;
1909 case PERF_RECORD_MMAP:
1910 ret = machine__process_mmap_event(machine, event, sample); break;
1911 case PERF_RECORD_NAMESPACES:
1912 ret = machine__process_namespaces_event(machine, event, sample); break;
1913 case PERF_RECORD_MMAP2:
1914 ret = machine__process_mmap2_event(machine, event, sample); break;
1915 case PERF_RECORD_FORK:
1916 ret = machine__process_fork_event(machine, event, sample); break;
1917 case PERF_RECORD_EXIT:
1918 ret = machine__process_exit_event(machine, event, sample); break;
1919 case PERF_RECORD_LOST:
1920 ret = machine__process_lost_event(machine, event, sample); break;
1921 case PERF_RECORD_AUX:
1922 ret = machine__process_aux_event(machine, event); break;
1923 case PERF_RECORD_ITRACE_START:
1924 ret = machine__process_itrace_start_event(machine, event); break;
1925 case PERF_RECORD_LOST_SAMPLES:
1926 ret = machine__process_lost_samples_event(machine, event, sample); break;
1927 case PERF_RECORD_SWITCH:
1928 case PERF_RECORD_SWITCH_CPU_WIDE:
1929 ret = machine__process_switch_event(machine, event); break;
1930 case PERF_RECORD_KSYMBOL:
1931 ret = machine__process_ksymbol(machine, event, sample); break;
1932 case PERF_RECORD_BPF_EVENT:
1933 ret = machine__process_bpf(machine, event, sample); break;
1934 default:
1935 ret = -1;
1936 break;
1937 }
1938
1939 return ret;
1940}
1941
1942static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1943{
1944 if (!regexec(regex, sym->name, 0, NULL, 0))
1945 return 1;
1946 return 0;
1947}
1948
1949static void ip__resolve_ams(struct thread *thread,
1950 struct addr_map_symbol *ams,
1951 u64 ip)
1952{
1953 struct addr_location al;
1954
1955 memset(&al, 0, sizeof(al));
1956 /*
1957 * We cannot use the header.misc hint to determine whether a
1958 * branch stack address is user, kernel, guest, hypervisor.
1959 * Branches may straddle the kernel/user/hypervisor boundaries.
1960 * Thus, we have to try consecutively until we find a match
1961 * or else, the symbol is unknown
1962 */
1963 thread__find_cpumode_addr_location(thread, ip, &al);
1964
1965 ams->addr = ip;
1966 ams->al_addr = al.addr;
1967 ams->sym = al.sym;
1968 ams->map = al.map;
1969 ams->phys_addr = 0;
1970}
1971
1972static void ip__resolve_data(struct thread *thread,
1973 u8 m, struct addr_map_symbol *ams,
1974 u64 addr, u64 phys_addr)
1975{
1976 struct addr_location al;
1977
1978 memset(&al, 0, sizeof(al));
1979
1980 thread__find_symbol(thread, m, addr, &al);
1981
1982 ams->addr = addr;
1983 ams->al_addr = al.addr;
1984 ams->sym = al.sym;
1985 ams->map = al.map;
1986 ams->phys_addr = phys_addr;
1987}
1988
1989struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1990 struct addr_location *al)
1991{
1992 struct mem_info *mi = mem_info__new();
1993
1994 if (!mi)
1995 return NULL;
1996
1997 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1998 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
1999 sample->addr, sample->phys_addr);
2000 mi->data_src.val = sample->data_src;
2001
2002 return mi;
2003}
2004
2005static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
2006{
2007 char *srcline = NULL;
2008
2009 if (!map || callchain_param.key == CCKEY_FUNCTION)
2010 return srcline;
2011
2012 srcline = srcline__tree_find(&map->dso->srclines, ip);
2013 if (!srcline) {
2014 bool show_sym = false;
2015 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2016
2017 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2018 sym, show_sym, show_addr, ip);
2019 srcline__tree_insert(&map->dso->srclines, ip, srcline);
2020 }
2021
2022 return srcline;
2023}
2024
2025struct iterations {
2026 int nr_loop_iter;
2027 u64 cycles;
2028};
2029
2030static int add_callchain_ip(struct thread *thread,
2031 struct callchain_cursor *cursor,
2032 struct symbol **parent,
2033 struct addr_location *root_al,
2034 u8 *cpumode,
2035 u64 ip,
2036 bool branch,
2037 struct branch_flags *flags,
2038 struct iterations *iter,
2039 u64 branch_from)
2040{
2041 struct addr_location al;
2042 int nr_loop_iter = 0;
2043 u64 iter_cycles = 0;
2044 const char *srcline = NULL;
2045
2046 al.filtered = 0;
2047 al.sym = NULL;
2048 if (!cpumode) {
2049 thread__find_cpumode_addr_location(thread, ip, &al);
2050 } else {
2051 if (ip >= PERF_CONTEXT_MAX) {
2052 switch (ip) {
2053 case PERF_CONTEXT_HV:
2054 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2055 break;
2056 case PERF_CONTEXT_KERNEL:
2057 *cpumode = PERF_RECORD_MISC_KERNEL;
2058 break;
2059 case PERF_CONTEXT_USER:
2060 *cpumode = PERF_RECORD_MISC_USER;
2061 break;
2062 default:
2063 pr_debug("invalid callchain context: "
2064 "%"PRId64"\n", (s64) ip);
2065 /*
2066 * It seems the callchain is corrupted.
2067 * Discard all.
2068 */
2069 callchain_cursor_reset(cursor);
2070 return 1;
2071 }
2072 return 0;
2073 }
2074 thread__find_symbol(thread, *cpumode, ip, &al);
2075 }
2076
2077 if (al.sym != NULL) {
2078 if (perf_hpp_list.parent && !*parent &&
2079 symbol__match_regex(al.sym, &parent_regex))
2080 *parent = al.sym;
2081 else if (have_ignore_callees && root_al &&
2082 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2083 /* Treat this symbol as the root,
2084 forgetting its callees. */
2085 *root_al = al;
2086 callchain_cursor_reset(cursor);
2087 }
2088 }
2089
2090 if (symbol_conf.hide_unresolved && al.sym == NULL)
2091 return 0;
2092
2093 if (iter) {
2094 nr_loop_iter = iter->nr_loop_iter;
2095 iter_cycles = iter->cycles;
2096 }
2097
2098 srcline = callchain_srcline(al.map, al.sym, al.addr);
2099 return callchain_cursor_append(cursor, ip, al.map, al.sym,
2100 branch, flags, nr_loop_iter,
2101 iter_cycles, branch_from, srcline);
2102}
2103
2104struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2105 struct addr_location *al)
2106{
2107 unsigned int i;
2108 const struct branch_stack *bs = sample->branch_stack;
2109 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2110
2111 if (!bi)
2112 return NULL;
2113
2114 for (i = 0; i < bs->nr; i++) {
2115 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
2116 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
2117 bi[i].flags = bs->entries[i].flags;
2118 }
2119 return bi;
2120}
2121
2122static void save_iterations(struct iterations *iter,
2123 struct branch_entry *be, int nr)
2124{
2125 int i;
2126
2127 iter->nr_loop_iter++;
2128 iter->cycles = 0;
2129
2130 for (i = 0; i < nr; i++)
2131 iter->cycles += be[i].flags.cycles;
2132}
2133
2134#define CHASHSZ 127
2135#define CHASHBITS 7
2136#define NO_ENTRY 0xff
2137
2138#define PERF_MAX_BRANCH_DEPTH 127
2139
2140/* Remove loops. */
2141static int remove_loops(struct branch_entry *l, int nr,
2142 struct iterations *iter)
2143{
2144 int i, j, off;
2145 unsigned char chash[CHASHSZ];
2146
2147 memset(chash, NO_ENTRY, sizeof(chash));
2148
2149 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2150
2151 for (i = 0; i < nr; i++) {
2152 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2153
2154 /* no collision handling for now */
2155 if (chash[h] == NO_ENTRY) {
2156 chash[h] = i;
2157 } else if (l[chash[h]].from == l[i].from) {
2158 bool is_loop = true;
2159 /* check if it is a real loop */
2160 off = 0;
2161 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2162 if (l[j].from != l[i + off].from) {
2163 is_loop = false;
2164 break;
2165 }
2166 if (is_loop) {
2167 j = nr - (i + off);
2168 if (j > 0) {
2169 save_iterations(iter + i + off,
2170 l + i, off);
2171
2172 memmove(iter + i, iter + i + off,
2173 j * sizeof(*iter));
2174
2175 memmove(l + i, l + i + off,
2176 j * sizeof(*l));
2177 }
2178
2179 nr -= off;
2180 }
2181 }
2182 }
2183 return nr;
2184}
2185
2186/*
2187 * Recolve LBR callstack chain sample
2188 * Return:
2189 * 1 on success get LBR callchain information
2190 * 0 no available LBR callchain information, should try fp
2191 * negative error code on other errors.
2192 */
2193static int resolve_lbr_callchain_sample(struct thread *thread,
2194 struct callchain_cursor *cursor,
2195 struct perf_sample *sample,
2196 struct symbol **parent,
2197 struct addr_location *root_al,
2198 int max_stack)
2199{
2200 struct ip_callchain *chain = sample->callchain;
2201 int chain_nr = min(max_stack, (int)chain->nr), i;
2202 u8 cpumode = PERF_RECORD_MISC_USER;
2203 u64 ip, branch_from = 0;
2204
2205 for (i = 0; i < chain_nr; i++) {
2206 if (chain->ips[i] == PERF_CONTEXT_USER)
2207 break;
2208 }
2209
2210 /* LBR only affects the user callchain */
2211 if (i != chain_nr) {
2212 struct branch_stack *lbr_stack = sample->branch_stack;
2213 int lbr_nr = lbr_stack->nr, j, k;
2214 bool branch;
2215 struct branch_flags *flags;
2216 /*
2217 * LBR callstack can only get user call chain.
2218 * The mix_chain_nr is kernel call chain
2219 * number plus LBR user call chain number.
2220 * i is kernel call chain number,
2221 * 1 is PERF_CONTEXT_USER,
2222 * lbr_nr + 1 is the user call chain number.
2223 * For details, please refer to the comments
2224 * in callchain__printf
2225 */
2226 int mix_chain_nr = i + 1 + lbr_nr + 1;
2227
2228 for (j = 0; j < mix_chain_nr; j++) {
2229 int err;
2230 branch = false;
2231 flags = NULL;
2232
2233 if (callchain_param.order == ORDER_CALLEE) {
2234 if (j < i + 1)
2235 ip = chain->ips[j];
2236 else if (j > i + 1) {
2237 k = j - i - 2;
2238 ip = lbr_stack->entries[k].from;
2239 branch = true;
2240 flags = &lbr_stack->entries[k].flags;
2241 } else {
2242 ip = lbr_stack->entries[0].to;
2243 branch = true;
2244 flags = &lbr_stack->entries[0].flags;
2245 branch_from =
2246 lbr_stack->entries[0].from;
2247 }
2248 } else {
2249 if (j < lbr_nr) {
2250 k = lbr_nr - j - 1;
2251 ip = lbr_stack->entries[k].from;
2252 branch = true;
2253 flags = &lbr_stack->entries[k].flags;
2254 }
2255 else if (j > lbr_nr)
2256 ip = chain->ips[i + 1 - (j - lbr_nr)];
2257 else {
2258 ip = lbr_stack->entries[0].to;
2259 branch = true;
2260 flags = &lbr_stack->entries[0].flags;
2261 branch_from =
2262 lbr_stack->entries[0].from;
2263 }
2264 }
2265
2266 err = add_callchain_ip(thread, cursor, parent,
2267 root_al, &cpumode, ip,
2268 branch, flags, NULL,
2269 branch_from);
2270 if (err)
2271 return (err < 0) ? err : 0;
2272 }
2273 return 1;
2274 }
2275
2276 return 0;
2277}
2278
2279static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2280 struct callchain_cursor *cursor,
2281 struct symbol **parent,
2282 struct addr_location *root_al,
2283 u8 *cpumode, int ent)
2284{
2285 int err = 0;
2286
2287 while (--ent >= 0) {
2288 u64 ip = chain->ips[ent];
2289
2290 if (ip >= PERF_CONTEXT_MAX) {
2291 err = add_callchain_ip(thread, cursor, parent,
2292 root_al, cpumode, ip,
2293 false, NULL, NULL, 0);
2294 break;
2295 }
2296 }
2297 return err;
2298}
2299
2300static int thread__resolve_callchain_sample(struct thread *thread,
2301 struct callchain_cursor *cursor,
2302 struct evsel *evsel,
2303 struct perf_sample *sample,
2304 struct symbol **parent,
2305 struct addr_location *root_al,
2306 int max_stack)
2307{
2308 struct branch_stack *branch = sample->branch_stack;
2309 struct ip_callchain *chain = sample->callchain;
2310 int chain_nr = 0;
2311 u8 cpumode = PERF_RECORD_MISC_USER;
2312 int i, j, err, nr_entries;
2313 int skip_idx = -1;
2314 int first_call = 0;
2315
2316 if (chain)
2317 chain_nr = chain->nr;
2318
2319 if (perf_evsel__has_branch_callstack(evsel)) {
2320 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2321 root_al, max_stack);
2322 if (err)
2323 return (err < 0) ? err : 0;
2324 }
2325
2326 /*
2327 * Based on DWARF debug information, some architectures skip
2328 * a callchain entry saved by the kernel.
2329 */
2330 skip_idx = arch_skip_callchain_idx(thread, chain);
2331
2332 /*
2333 * Add branches to call stack for easier browsing. This gives
2334 * more context for a sample than just the callers.
2335 *
2336 * This uses individual histograms of paths compared to the
2337 * aggregated histograms the normal LBR mode uses.
2338 *
2339 * Limitations for now:
2340 * - No extra filters
2341 * - No annotations (should annotate somehow)
2342 */
2343
2344 if (branch && callchain_param.branch_callstack) {
2345 int nr = min(max_stack, (int)branch->nr);
2346 struct branch_entry be[nr];
2347 struct iterations iter[nr];
2348
2349 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2350 pr_warning("corrupted branch chain. skipping...\n");
2351 goto check_calls;
2352 }
2353
2354 for (i = 0; i < nr; i++) {
2355 if (callchain_param.order == ORDER_CALLEE) {
2356 be[i] = branch->entries[i];
2357
2358 if (chain == NULL)
2359 continue;
2360
2361 /*
2362 * Check for overlap into the callchain.
2363 * The return address is one off compared to
2364 * the branch entry. To adjust for this
2365 * assume the calling instruction is not longer
2366 * than 8 bytes.
2367 */
2368 if (i == skip_idx ||
2369 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2370 first_call++;
2371 else if (be[i].from < chain->ips[first_call] &&
2372 be[i].from >= chain->ips[first_call] - 8)
2373 first_call++;
2374 } else
2375 be[i] = branch->entries[branch->nr - i - 1];
2376 }
2377
2378 memset(iter, 0, sizeof(struct iterations) * nr);
2379 nr = remove_loops(be, nr, iter);
2380
2381 for (i = 0; i < nr; i++) {
2382 err = add_callchain_ip(thread, cursor, parent,
2383 root_al,
2384 NULL, be[i].to,
2385 true, &be[i].flags,
2386 NULL, be[i].from);
2387
2388 if (!err)
2389 err = add_callchain_ip(thread, cursor, parent, root_al,
2390 NULL, be[i].from,
2391 true, &be[i].flags,
2392 &iter[i], 0);
2393 if (err == -EINVAL)
2394 break;
2395 if (err)
2396 return err;
2397 }
2398
2399 if (chain_nr == 0)
2400 return 0;
2401
2402 chain_nr -= nr;
2403 }
2404
2405check_calls:
2406 if (callchain_param.order != ORDER_CALLEE) {
2407 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2408 &cpumode, chain->nr - first_call);
2409 if (err)
2410 return (err < 0) ? err : 0;
2411 }
2412 for (i = first_call, nr_entries = 0;
2413 i < chain_nr && nr_entries < max_stack; i++) {
2414 u64 ip;
2415
2416 if (callchain_param.order == ORDER_CALLEE)
2417 j = i;
2418 else
2419 j = chain->nr - i - 1;
2420
2421#ifdef HAVE_SKIP_CALLCHAIN_IDX
2422 if (j == skip_idx)
2423 continue;
2424#endif
2425 ip = chain->ips[j];
2426 if (ip < PERF_CONTEXT_MAX)
2427 ++nr_entries;
2428 else if (callchain_param.order != ORDER_CALLEE) {
2429 err = find_prev_cpumode(chain, thread, cursor, parent,
2430 root_al, &cpumode, j);
2431 if (err)
2432 return (err < 0) ? err : 0;
2433 continue;
2434 }
2435
2436 err = add_callchain_ip(thread, cursor, parent,
2437 root_al, &cpumode, ip,
2438 false, NULL, NULL, 0);
2439
2440 if (err)
2441 return (err < 0) ? err : 0;
2442 }
2443
2444 return 0;
2445}
2446
2447static int append_inlines(struct callchain_cursor *cursor,
2448 struct map *map, struct symbol *sym, u64 ip)
2449{
2450 struct inline_node *inline_node;
2451 struct inline_list *ilist;
2452 u64 addr;
2453 int ret = 1;
2454
2455 if (!symbol_conf.inline_name || !map || !sym)
2456 return ret;
2457
2458 addr = map__map_ip(map, ip);
2459 addr = map__rip_2objdump(map, addr);
2460
2461 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2462 if (!inline_node) {
2463 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2464 if (!inline_node)
2465 return ret;
2466 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2467 }
2468
2469 list_for_each_entry(ilist, &inline_node->val, list) {
2470 ret = callchain_cursor_append(cursor, ip, map,
2471 ilist->symbol, false,
2472 NULL, 0, 0, 0, ilist->srcline);
2473
2474 if (ret != 0)
2475 return ret;
2476 }
2477
2478 return ret;
2479}
2480
2481static int unwind_entry(struct unwind_entry *entry, void *arg)
2482{
2483 struct callchain_cursor *cursor = arg;
2484 const char *srcline = NULL;
2485 u64 addr = entry->ip;
2486
2487 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2488 return 0;
2489
2490 if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
2491 return 0;
2492
2493 /*
2494 * Convert entry->ip from a virtual address to an offset in
2495 * its corresponding binary.
2496 */
2497 if (entry->map)
2498 addr = map__map_ip(entry->map, entry->ip);
2499
2500 srcline = callchain_srcline(entry->map, entry->sym, addr);
2501 return callchain_cursor_append(cursor, entry->ip,
2502 entry->map, entry->sym,
2503 false, NULL, 0, 0, 0, srcline);
2504}
2505
2506static int thread__resolve_callchain_unwind(struct thread *thread,
2507 struct callchain_cursor *cursor,
2508 struct evsel *evsel,
2509 struct perf_sample *sample,
2510 int max_stack)
2511{
2512 /* Can we do dwarf post unwind? */
2513 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2514 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2515 return 0;
2516
2517 /* Bail out if nothing was captured. */
2518 if ((!sample->user_regs.regs) ||
2519 (!sample->user_stack.size))
2520 return 0;
2521
2522 return unwind__get_entries(unwind_entry, cursor,
2523 thread, sample, max_stack);
2524}
2525
2526int thread__resolve_callchain(struct thread *thread,
2527 struct callchain_cursor *cursor,
2528 struct evsel *evsel,
2529 struct perf_sample *sample,
2530 struct symbol **parent,
2531 struct addr_location *root_al,
2532 int max_stack)
2533{
2534 int ret = 0;
2535
2536 callchain_cursor_reset(cursor);
2537
2538 if (callchain_param.order == ORDER_CALLEE) {
2539 ret = thread__resolve_callchain_sample(thread, cursor,
2540 evsel, sample,
2541 parent, root_al,
2542 max_stack);
2543 if (ret)
2544 return ret;
2545 ret = thread__resolve_callchain_unwind(thread, cursor,
2546 evsel, sample,
2547 max_stack);
2548 } else {
2549 ret = thread__resolve_callchain_unwind(thread, cursor,
2550 evsel, sample,
2551 max_stack);
2552 if (ret)
2553 return ret;
2554 ret = thread__resolve_callchain_sample(thread, cursor,
2555 evsel, sample,
2556 parent, root_al,
2557 max_stack);
2558 }
2559
2560 return ret;
2561}
2562
2563int machine__for_each_thread(struct machine *machine,
2564 int (*fn)(struct thread *thread, void *p),
2565 void *priv)
2566{
2567 struct threads *threads;
2568 struct rb_node *nd;
2569 struct thread *thread;
2570 int rc = 0;
2571 int i;
2572
2573 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2574 threads = &machine->threads[i];
2575 for (nd = rb_first_cached(&threads->entries); nd;
2576 nd = rb_next(nd)) {
2577 thread = rb_entry(nd, struct thread, rb_node);
2578 rc = fn(thread, priv);
2579 if (rc != 0)
2580 return rc;
2581 }
2582
2583 list_for_each_entry(thread, &threads->dead, node) {
2584 rc = fn(thread, priv);
2585 if (rc != 0)
2586 return rc;
2587 }
2588 }
2589 return rc;
2590}
2591
2592int machines__for_each_thread(struct machines *machines,
2593 int (*fn)(struct thread *thread, void *p),
2594 void *priv)
2595{
2596 struct rb_node *nd;
2597 int rc = 0;
2598
2599 rc = machine__for_each_thread(&machines->host, fn, priv);
2600 if (rc != 0)
2601 return rc;
2602
2603 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
2604 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2605
2606 rc = machine__for_each_thread(machine, fn, priv);
2607 if (rc != 0)
2608 return rc;
2609 }
2610 return rc;
2611}
2612
2613pid_t machine__get_current_tid(struct machine *machine, int cpu)
2614{
2615 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2616
2617 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
2618 return -1;
2619
2620 return machine->current_tid[cpu];
2621}
2622
2623int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2624 pid_t tid)
2625{
2626 struct thread *thread;
2627 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2628
2629 if (cpu < 0)
2630 return -EINVAL;
2631
2632 if (!machine->current_tid) {
2633 int i;
2634
2635 machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
2636 if (!machine->current_tid)
2637 return -ENOMEM;
2638 for (i = 0; i < nr_cpus; i++)
2639 machine->current_tid[i] = -1;
2640 }
2641
2642 if (cpu >= nr_cpus) {
2643 pr_err("Requested CPU %d too large. ", cpu);
2644 pr_err("Consider raising MAX_NR_CPUS\n");
2645 return -EINVAL;
2646 }
2647
2648 machine->current_tid[cpu] = tid;
2649
2650 thread = machine__findnew_thread(machine, pid, tid);
2651 if (!thread)
2652 return -ENOMEM;
2653
2654 thread->cpu = cpu;
2655 thread__put(thread);
2656
2657 return 0;
2658}
2659
2660/*
2661 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2662 * normalized arch is needed.
2663 */
2664bool machine__is(struct machine *machine, const char *arch)
2665{
2666 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
2667}
2668
2669int machine__nr_cpus_avail(struct machine *machine)
2670{
2671 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
2672}
2673
2674int machine__get_kernel_start(struct machine *machine)
2675{
2676 struct map *map = machine__kernel_map(machine);
2677 int err = 0;
2678
2679 /*
2680 * The only addresses above 2^63 are kernel addresses of a 64-bit
2681 * kernel. Note that addresses are unsigned so that on a 32-bit system
2682 * all addresses including kernel addresses are less than 2^32. In
2683 * that case (32-bit system), if the kernel mapping is unknown, all
2684 * addresses will be assumed to be in user space - see
2685 * machine__kernel_ip().
2686 */
2687 machine->kernel_start = 1ULL << 63;
2688 if (map) {
2689 err = map__load(map);
2690 /*
2691 * On x86_64, PTI entry trampolines are less than the
2692 * start of kernel text, but still above 2^63. So leave
2693 * kernel_start = 1ULL << 63 for x86_64.
2694 */
2695 if (!err && !machine__is(machine, "x86_64"))
2696 machine->kernel_start = map->start;
2697 }
2698 return err;
2699}
2700
2701u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
2702{
2703 u8 addr_cpumode = cpumode;
2704 bool kernel_ip;
2705
2706 if (!machine->single_address_space)
2707 goto out;
2708
2709 kernel_ip = machine__kernel_ip(machine, addr);
2710 switch (cpumode) {
2711 case PERF_RECORD_MISC_KERNEL:
2712 case PERF_RECORD_MISC_USER:
2713 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
2714 PERF_RECORD_MISC_USER;
2715 break;
2716 case PERF_RECORD_MISC_GUEST_KERNEL:
2717 case PERF_RECORD_MISC_GUEST_USER:
2718 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
2719 PERF_RECORD_MISC_GUEST_USER;
2720 break;
2721 default:
2722 break;
2723 }
2724out:
2725 return addr_cpumode;
2726}
2727
2728struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2729{
2730 return dsos__findnew(&machine->dsos, filename);
2731}
2732
2733char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2734{
2735 struct machine *machine = vmachine;
2736 struct map *map;
2737 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
2738
2739 if (sym == NULL)
2740 return NULL;
2741
2742 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2743 *addrp = map->unmap_ip(map, sym->start);
2744 return sym->name;
2745}
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <inttypes.h>
5#include <regex.h>
6#include <stdlib.h>
7#include "callchain.h"
8#include "debug.h"
9#include "dso.h"
10#include "env.h"
11#include "event.h"
12#include "evsel.h"
13#include "hist.h"
14#include "machine.h"
15#include "map.h"
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
19#include "mem-info.h"
20#include "path.h"
21#include "srcline.h"
22#include "symbol.h"
23#include "sort.h"
24#include "strlist.h"
25#include "target.h"
26#include "thread.h"
27#include "util.h"
28#include "vdso.h"
29#include <stdbool.h>
30#include <sys/types.h>
31#include <sys/stat.h>
32#include <unistd.h>
33#include "unwind.h"
34#include "linux/hash.h"
35#include "asm/bug.h"
36#include "bpf-event.h"
37#include <internal/lib.h> // page_size
38#include "cgroup.h"
39#include "arm64-frame-pointer-unwind-support.h"
40
41#include <linux/ctype.h>
42#include <symbol/kallsyms.h>
43#include <linux/mman.h>
44#include <linux/string.h>
45#include <linux/zalloc.h>
46
47static struct dso *machine__kernel_dso(struct machine *machine)
48{
49 return map__dso(machine->vmlinux_map);
50}
51
52static int machine__set_mmap_name(struct machine *machine)
53{
54 if (machine__is_host(machine))
55 machine->mmap_name = strdup("[kernel.kallsyms]");
56 else if (machine__is_default_guest(machine))
57 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
58 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
59 machine->pid) < 0)
60 machine->mmap_name = NULL;
61
62 return machine->mmap_name ? 0 : -ENOMEM;
63}
64
65static void thread__set_guest_comm(struct thread *thread, pid_t pid)
66{
67 char comm[64];
68
69 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
70 thread__set_comm(thread, comm, 0);
71}
72
73int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
74{
75 int err = -ENOMEM;
76
77 memset(machine, 0, sizeof(*machine));
78 machine->kmaps = maps__new(machine);
79 if (machine->kmaps == NULL)
80 return -ENOMEM;
81
82 RB_CLEAR_NODE(&machine->rb_node);
83 dsos__init(&machine->dsos);
84
85 threads__init(&machine->threads);
86
87 machine->vdso_info = NULL;
88 machine->env = NULL;
89
90 machine->pid = pid;
91
92 machine->id_hdr_size = 0;
93 machine->kptr_restrict_warned = false;
94 machine->comm_exec = false;
95 machine->kernel_start = 0;
96 machine->vmlinux_map = NULL;
97
98 machine->root_dir = strdup(root_dir);
99 if (machine->root_dir == NULL)
100 goto out;
101
102 if (machine__set_mmap_name(machine))
103 goto out;
104
105 if (pid != HOST_KERNEL_ID) {
106 struct thread *thread = machine__findnew_thread(machine, -1,
107 pid);
108
109 if (thread == NULL)
110 goto out;
111
112 thread__set_guest_comm(thread, pid);
113 thread__put(thread);
114 }
115
116 machine->current_tid = NULL;
117 err = 0;
118
119out:
120 if (err) {
121 zfree(&machine->kmaps);
122 zfree(&machine->root_dir);
123 zfree(&machine->mmap_name);
124 }
125 return 0;
126}
127
128struct machine *machine__new_host(void)
129{
130 struct machine *machine = malloc(sizeof(*machine));
131
132 if (machine != NULL) {
133 machine__init(machine, "", HOST_KERNEL_ID);
134
135 if (machine__create_kernel_maps(machine) < 0)
136 goto out_delete;
137
138 machine->env = &perf_env;
139 }
140
141 return machine;
142out_delete:
143 free(machine);
144 return NULL;
145}
146
147struct machine *machine__new_kallsyms(void)
148{
149 struct machine *machine = machine__new_host();
150 /*
151 * FIXME:
152 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
153 * ask for not using the kcore parsing code, once this one is fixed
154 * to create a map per module.
155 */
156 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
157 machine__delete(machine);
158 machine = NULL;
159 }
160
161 return machine;
162}
163
164void machine__delete_threads(struct machine *machine)
165{
166 threads__remove_all_threads(&machine->threads);
167}
168
169void machine__exit(struct machine *machine)
170{
171 if (machine == NULL)
172 return;
173
174 machine__destroy_kernel_maps(machine);
175 maps__zput(machine->kmaps);
176 dsos__exit(&machine->dsos);
177 machine__exit_vdso(machine);
178 zfree(&machine->root_dir);
179 zfree(&machine->mmap_name);
180 zfree(&machine->current_tid);
181 zfree(&machine->kallsyms_filename);
182
183 threads__exit(&machine->threads);
184}
185
186void machine__delete(struct machine *machine)
187{
188 if (machine) {
189 machine__exit(machine);
190 free(machine);
191 }
192}
193
194void machines__init(struct machines *machines)
195{
196 machine__init(&machines->host, "", HOST_KERNEL_ID);
197 machines->guests = RB_ROOT_CACHED;
198}
199
200void machines__exit(struct machines *machines)
201{
202 machine__exit(&machines->host);
203 /* XXX exit guest */
204}
205
206struct machine *machines__add(struct machines *machines, pid_t pid,
207 const char *root_dir)
208{
209 struct rb_node **p = &machines->guests.rb_root.rb_node;
210 struct rb_node *parent = NULL;
211 struct machine *pos, *machine = malloc(sizeof(*machine));
212 bool leftmost = true;
213
214 if (machine == NULL)
215 return NULL;
216
217 if (machine__init(machine, root_dir, pid) != 0) {
218 free(machine);
219 return NULL;
220 }
221
222 while (*p != NULL) {
223 parent = *p;
224 pos = rb_entry(parent, struct machine, rb_node);
225 if (pid < pos->pid)
226 p = &(*p)->rb_left;
227 else {
228 p = &(*p)->rb_right;
229 leftmost = false;
230 }
231 }
232
233 rb_link_node(&machine->rb_node, parent, p);
234 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
235
236 machine->machines = machines;
237
238 return machine;
239}
240
241void machines__set_comm_exec(struct machines *machines, bool comm_exec)
242{
243 struct rb_node *nd;
244
245 machines->host.comm_exec = comm_exec;
246
247 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
248 struct machine *machine = rb_entry(nd, struct machine, rb_node);
249
250 machine->comm_exec = comm_exec;
251 }
252}
253
254struct machine *machines__find(struct machines *machines, pid_t pid)
255{
256 struct rb_node **p = &machines->guests.rb_root.rb_node;
257 struct rb_node *parent = NULL;
258 struct machine *machine;
259 struct machine *default_machine = NULL;
260
261 if (pid == HOST_KERNEL_ID)
262 return &machines->host;
263
264 while (*p != NULL) {
265 parent = *p;
266 machine = rb_entry(parent, struct machine, rb_node);
267 if (pid < machine->pid)
268 p = &(*p)->rb_left;
269 else if (pid > machine->pid)
270 p = &(*p)->rb_right;
271 else
272 return machine;
273 if (!machine->pid)
274 default_machine = machine;
275 }
276
277 return default_machine;
278}
279
280struct machine *machines__findnew(struct machines *machines, pid_t pid)
281{
282 char path[PATH_MAX];
283 const char *root_dir = "";
284 struct machine *machine = machines__find(machines, pid);
285
286 if (machine && (machine->pid == pid))
287 goto out;
288
289 if ((pid != HOST_KERNEL_ID) &&
290 (pid != DEFAULT_GUEST_KERNEL_ID) &&
291 (symbol_conf.guestmount)) {
292 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
293 if (access(path, R_OK)) {
294 static struct strlist *seen;
295
296 if (!seen)
297 seen = strlist__new(NULL, NULL);
298
299 if (!strlist__has_entry(seen, path)) {
300 pr_err("Can't access file %s\n", path);
301 strlist__add(seen, path);
302 }
303 machine = NULL;
304 goto out;
305 }
306 root_dir = path;
307 }
308
309 machine = machines__add(machines, pid, root_dir);
310out:
311 return machine;
312}
313
314struct machine *machines__find_guest(struct machines *machines, pid_t pid)
315{
316 struct machine *machine = machines__find(machines, pid);
317
318 if (!machine)
319 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
320 return machine;
321}
322
323/*
324 * A common case for KVM test programs is that the test program acts as the
325 * hypervisor, creating, running and destroying the virtual machine, and
326 * providing the guest object code from its own object code. In this case,
327 * the VM is not running an OS, but only the functions loaded into it by the
328 * hypervisor test program, and conveniently, loaded at the same virtual
329 * addresses.
330 *
331 * Normally to resolve addresses, MMAP events are needed to map addresses
332 * back to the object code and debug symbols for that object code.
333 *
334 * Currently, there is no way to get such mapping information from guests
335 * but, in the scenario described above, the guest has the same mappings
336 * as the hypervisor, so support for that scenario can be achieved.
337 *
338 * To support that, copy the host thread's maps to the guest thread's maps.
339 * Note, we do not discover the guest until we encounter a guest event,
340 * which works well because it is not until then that we know that the host
341 * thread's maps have been set up.
342 *
343 * This function returns the guest thread. Apart from keeping the data
344 * structures sane, using a thread belonging to the guest machine, instead
345 * of the host thread, allows it to have its own comm (refer
346 * thread__set_guest_comm()).
347 */
348static struct thread *findnew_guest_code(struct machine *machine,
349 struct machine *host_machine,
350 pid_t pid)
351{
352 struct thread *host_thread;
353 struct thread *thread;
354 int err;
355
356 if (!machine)
357 return NULL;
358
359 thread = machine__findnew_thread(machine, -1, pid);
360 if (!thread)
361 return NULL;
362
363 /* Assume maps are set up if there are any */
364 if (!maps__empty(thread__maps(thread)))
365 return thread;
366
367 host_thread = machine__find_thread(host_machine, -1, pid);
368 if (!host_thread)
369 goto out_err;
370
371 thread__set_guest_comm(thread, pid);
372
373 /*
374 * Guest code can be found in hypervisor process at the same address
375 * so copy host maps.
376 */
377 err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
378 thread__put(host_thread);
379 if (err)
380 goto out_err;
381
382 return thread;
383
384out_err:
385 thread__zput(thread);
386 return NULL;
387}
388
389struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
390{
391 struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
392 struct machine *machine = machines__findnew(machines, pid);
393
394 return findnew_guest_code(machine, host_machine, pid);
395}
396
397struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
398{
399 struct machines *machines = machine->machines;
400 struct machine *host_machine;
401
402 if (!machines)
403 return NULL;
404
405 host_machine = machines__find(machines, HOST_KERNEL_ID);
406
407 return findnew_guest_code(machine, host_machine, pid);
408}
409
410void machines__process_guests(struct machines *machines,
411 machine__process_t process, void *data)
412{
413 struct rb_node *nd;
414
415 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
416 struct machine *pos = rb_entry(nd, struct machine, rb_node);
417 process(pos, data);
418 }
419}
420
421void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
422{
423 struct rb_node *node;
424 struct machine *machine;
425
426 machines->host.id_hdr_size = id_hdr_size;
427
428 for (node = rb_first_cached(&machines->guests); node;
429 node = rb_next(node)) {
430 machine = rb_entry(node, struct machine, rb_node);
431 machine->id_hdr_size = id_hdr_size;
432 }
433
434 return;
435}
436
437static void machine__update_thread_pid(struct machine *machine,
438 struct thread *th, pid_t pid)
439{
440 struct thread *leader;
441
442 if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
443 return;
444
445 thread__set_pid(th, pid);
446
447 if (thread__pid(th) == thread__tid(th))
448 return;
449
450 leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
451 if (!leader)
452 goto out_err;
453
454 if (!thread__maps(leader))
455 thread__set_maps(leader, maps__new(machine));
456
457 if (!thread__maps(leader))
458 goto out_err;
459
460 if (thread__maps(th) == thread__maps(leader))
461 goto out_put;
462
463 if (thread__maps(th)) {
464 /*
465 * Maps are created from MMAP events which provide the pid and
466 * tid. Consequently there never should be any maps on a thread
467 * with an unknown pid. Just print an error if there are.
468 */
469 if (!maps__empty(thread__maps(th)))
470 pr_err("Discarding thread maps for %d:%d\n",
471 thread__pid(th), thread__tid(th));
472 maps__put(thread__maps(th));
473 }
474
475 thread__set_maps(th, maps__get(thread__maps(leader)));
476out_put:
477 thread__put(leader);
478 return;
479out_err:
480 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
481 goto out_put;
482}
483
484/*
485 * Caller must eventually drop thread->refcnt returned with a successful
486 * lookup/new thread inserted.
487 */
488static struct thread *__machine__findnew_thread(struct machine *machine,
489 pid_t pid,
490 pid_t tid,
491 bool create)
492{
493 struct thread *th = threads__find(&machine->threads, tid);
494 bool created;
495
496 if (th) {
497 machine__update_thread_pid(machine, th, pid);
498 return th;
499 }
500 if (!create)
501 return NULL;
502
503 th = threads__findnew(&machine->threads, pid, tid, &created);
504 if (created) {
505 /*
506 * We have to initialize maps separately after rb tree is
507 * updated.
508 *
509 * The reason is that we call machine__findnew_thread within
510 * thread__init_maps to find the thread leader and that would
511 * screwed the rb tree.
512 */
513 if (thread__init_maps(th, machine)) {
514 pr_err("Thread init failed thread %d\n", pid);
515 threads__remove(&machine->threads, th);
516 thread__put(th);
517 return NULL;
518 }
519 } else
520 machine__update_thread_pid(machine, th, pid);
521
522 return th;
523}
524
525struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
526{
527 return __machine__findnew_thread(machine, pid, tid, /*create=*/true);
528}
529
530struct thread *machine__find_thread(struct machine *machine, pid_t pid,
531 pid_t tid)
532{
533 return __machine__findnew_thread(machine, pid, tid, /*create=*/false);
534}
535
536/*
537 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
538 * So here a single thread is created for that, but actually there is a separate
539 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
540 * is only 1. That causes problems for some tools, requiring workarounds. For
541 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
542 */
543struct thread *machine__idle_thread(struct machine *machine)
544{
545 struct thread *thread = machine__findnew_thread(machine, 0, 0);
546
547 if (!thread || thread__set_comm(thread, "swapper", 0) ||
548 thread__set_namespaces(thread, 0, NULL))
549 pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
550
551 return thread;
552}
553
554struct comm *machine__thread_exec_comm(struct machine *machine,
555 struct thread *thread)
556{
557 if (machine->comm_exec)
558 return thread__exec_comm(thread);
559 else
560 return thread__comm(thread);
561}
562
563int machine__process_comm_event(struct machine *machine, union perf_event *event,
564 struct perf_sample *sample)
565{
566 struct thread *thread = machine__findnew_thread(machine,
567 event->comm.pid,
568 event->comm.tid);
569 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
570 int err = 0;
571
572 if (exec)
573 machine->comm_exec = true;
574
575 if (dump_trace)
576 perf_event__fprintf_comm(event, stdout);
577
578 if (thread == NULL ||
579 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
580 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
581 err = -1;
582 }
583
584 thread__put(thread);
585
586 return err;
587}
588
589int machine__process_namespaces_event(struct machine *machine __maybe_unused,
590 union perf_event *event,
591 struct perf_sample *sample __maybe_unused)
592{
593 struct thread *thread = machine__findnew_thread(machine,
594 event->namespaces.pid,
595 event->namespaces.tid);
596 int err = 0;
597
598 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
599 "\nWARNING: kernel seems to support more namespaces than perf"
600 " tool.\nTry updating the perf tool..\n\n");
601
602 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
603 "\nWARNING: perf tool seems to support more namespaces than"
604 " the kernel.\nTry updating the kernel..\n\n");
605
606 if (dump_trace)
607 perf_event__fprintf_namespaces(event, stdout);
608
609 if (thread == NULL ||
610 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
611 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
612 err = -1;
613 }
614
615 thread__put(thread);
616
617 return err;
618}
619
620int machine__process_cgroup_event(struct machine *machine,
621 union perf_event *event,
622 struct perf_sample *sample __maybe_unused)
623{
624 struct cgroup *cgrp;
625
626 if (dump_trace)
627 perf_event__fprintf_cgroup(event, stdout);
628
629 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
630 if (cgrp == NULL)
631 return -ENOMEM;
632
633 return 0;
634}
635
636int machine__process_lost_event(struct machine *machine __maybe_unused,
637 union perf_event *event, struct perf_sample *sample __maybe_unused)
638{
639 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
640 event->lost.id, event->lost.lost);
641 return 0;
642}
643
644int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
645 union perf_event *event, struct perf_sample *sample)
646{
647 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n",
648 sample->id, event->lost_samples.lost,
649 event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : "");
650 return 0;
651}
652
653int machine__process_aux_event(struct machine *machine __maybe_unused,
654 union perf_event *event)
655{
656 if (dump_trace)
657 perf_event__fprintf_aux(event, stdout);
658 return 0;
659}
660
661int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
662 union perf_event *event)
663{
664 if (dump_trace)
665 perf_event__fprintf_itrace_start(event, stdout);
666 return 0;
667}
668
669int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
670 union perf_event *event)
671{
672 if (dump_trace)
673 perf_event__fprintf_aux_output_hw_id(event, stdout);
674 return 0;
675}
676
677int machine__process_switch_event(struct machine *machine __maybe_unused,
678 union perf_event *event)
679{
680 if (dump_trace)
681 perf_event__fprintf_switch(event, stdout);
682 return 0;
683}
684
685static int machine__process_ksymbol_register(struct machine *machine,
686 union perf_event *event,
687 struct perf_sample *sample __maybe_unused)
688{
689 struct symbol *sym;
690 struct dso *dso = NULL;
691 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
692 int err = 0;
693
694 if (!map) {
695 dso = dso__new(event->ksymbol.name);
696
697 if (!dso) {
698 err = -ENOMEM;
699 goto out;
700 }
701 dso__set_kernel(dso, DSO_SPACE__KERNEL);
702 map = map__new2(0, dso);
703 if (!map) {
704 err = -ENOMEM;
705 goto out;
706 }
707 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
708 dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL);
709 dso__data(dso)->file_size = event->ksymbol.len;
710 dso__set_loaded(dso);
711 }
712
713 map__set_start(map, event->ksymbol.addr);
714 map__set_end(map, map__start(map) + event->ksymbol.len);
715 err = maps__insert(machine__kernel_maps(machine), map);
716 if (err) {
717 err = -ENOMEM;
718 goto out;
719 }
720
721 dso__set_loaded(dso);
722
723 if (is_bpf_image(event->ksymbol.name)) {
724 dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE);
725 dso__set_long_name(dso, "", false);
726 }
727 } else {
728 dso = dso__get(map__dso(map));
729 }
730
731 sym = symbol__new(map__map_ip(map, map__start(map)),
732 event->ksymbol.len,
733 0, 0, event->ksymbol.name);
734 if (!sym) {
735 err = -ENOMEM;
736 goto out;
737 }
738 dso__insert_symbol(dso, sym);
739out:
740 map__put(map);
741 dso__put(dso);
742 return err;
743}
744
745static int machine__process_ksymbol_unregister(struct machine *machine,
746 union perf_event *event,
747 struct perf_sample *sample __maybe_unused)
748{
749 struct symbol *sym;
750 struct map *map;
751
752 map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
753 if (!map)
754 return 0;
755
756 if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
757 maps__remove(machine__kernel_maps(machine), map);
758 else {
759 struct dso *dso = map__dso(map);
760
761 sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
762 if (sym)
763 dso__delete_symbol(dso, sym);
764 }
765 map__put(map);
766 return 0;
767}
768
769int machine__process_ksymbol(struct machine *machine __maybe_unused,
770 union perf_event *event,
771 struct perf_sample *sample)
772{
773 if (dump_trace)
774 perf_event__fprintf_ksymbol(event, stdout);
775
776 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
777 return machine__process_ksymbol_unregister(machine, event,
778 sample);
779 return machine__process_ksymbol_register(machine, event, sample);
780}
781
782int machine__process_text_poke(struct machine *machine, union perf_event *event,
783 struct perf_sample *sample __maybe_unused)
784{
785 struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
786 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
787 struct dso *dso = map ? map__dso(map) : NULL;
788
789 if (dump_trace)
790 perf_event__fprintf_text_poke(event, machine, stdout);
791
792 if (!event->text_poke.new_len)
793 goto out;
794
795 if (cpumode != PERF_RECORD_MISC_KERNEL) {
796 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
797 goto out;
798 }
799
800 if (dso) {
801 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
802 int ret;
803
804 /*
805 * Kernel maps might be changed when loading symbols so loading
806 * must be done prior to using kernel maps.
807 */
808 map__load(map);
809 ret = dso__data_write_cache_addr(dso, map, machine,
810 event->text_poke.addr,
811 new_bytes,
812 event->text_poke.new_len);
813 if (ret != event->text_poke.new_len)
814 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
815 event->text_poke.addr);
816 } else {
817 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
818 event->text_poke.addr);
819 }
820out:
821 map__put(map);
822 return 0;
823}
824
825static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
826 const char *filename)
827{
828 struct map *map = NULL;
829 struct kmod_path m;
830 struct dso *dso;
831 int err;
832
833 if (kmod_path__parse_name(&m, filename))
834 return NULL;
835
836 dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename);
837 if (dso == NULL)
838 goto out;
839
840 map = map__new2(start, dso);
841 if (map == NULL)
842 goto out;
843
844 err = maps__insert(machine__kernel_maps(machine), map);
845 /* If maps__insert failed, return NULL. */
846 if (err) {
847 map__put(map);
848 map = NULL;
849 }
850out:
851 /* put the dso here, corresponding to machine__findnew_module_dso */
852 dso__put(dso);
853 zfree(&m.name);
854 return map;
855}
856
857size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
858{
859 struct rb_node *nd;
860 size_t ret = dsos__fprintf(&machines->host.dsos, fp);
861
862 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
863 struct machine *pos = rb_entry(nd, struct machine, rb_node);
864 ret += dsos__fprintf(&pos->dsos, fp);
865 }
866
867 return ret;
868}
869
870size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
871 bool (skip)(struct dso *dso, int parm), int parm)
872{
873 return dsos__fprintf_buildid(&m->dsos, fp, skip, parm);
874}
875
876size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
877 bool (skip)(struct dso *dso, int parm), int parm)
878{
879 struct rb_node *nd;
880 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
881
882 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
883 struct machine *pos = rb_entry(nd, struct machine, rb_node);
884 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
885 }
886 return ret;
887}
888
889size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
890{
891 int i;
892 size_t printed = 0;
893 struct dso *kdso = machine__kernel_dso(machine);
894
895 if (dso__has_build_id(kdso)) {
896 char filename[PATH_MAX];
897
898 if (dso__build_id_filename(kdso, filename, sizeof(filename), false))
899 printed += fprintf(fp, "[0] %s\n", filename);
900 }
901
902 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
903 printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso),
904 vmlinux_path[i]);
905 }
906 return printed;
907}
908
909struct machine_fprintf_cb_args {
910 FILE *fp;
911 size_t printed;
912};
913
914static int machine_fprintf_cb(struct thread *thread, void *data)
915{
916 struct machine_fprintf_cb_args *args = data;
917
918 /* TODO: handle fprintf errors. */
919 args->printed += thread__fprintf(thread, args->fp);
920 return 0;
921}
922
923size_t machine__fprintf(struct machine *machine, FILE *fp)
924{
925 struct machine_fprintf_cb_args args = {
926 .fp = fp,
927 .printed = 0,
928 };
929 size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads));
930
931 machine__for_each_thread(machine, machine_fprintf_cb, &args);
932 return ret + args.printed;
933}
934
935static struct dso *machine__get_kernel(struct machine *machine)
936{
937 const char *vmlinux_name = machine->mmap_name;
938 struct dso *kernel;
939
940 if (machine__is_host(machine)) {
941 if (symbol_conf.vmlinux_name)
942 vmlinux_name = symbol_conf.vmlinux_name;
943
944 kernel = machine__findnew_kernel(machine, vmlinux_name,
945 "[kernel]", DSO_SPACE__KERNEL);
946 } else {
947 if (symbol_conf.default_guest_vmlinux_name)
948 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
949
950 kernel = machine__findnew_kernel(machine, vmlinux_name,
951 "[guest.kernel]",
952 DSO_SPACE__KERNEL_GUEST);
953 }
954
955 if (kernel != NULL && (!dso__has_build_id(kernel)))
956 dso__read_running_kernel_build_id(kernel, machine);
957
958 return kernel;
959}
960
961void machine__get_kallsyms_filename(struct machine *machine, char *buf,
962 size_t bufsz)
963{
964 if (machine__is_default_guest(machine))
965 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
966 else
967 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
968}
969
970const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
971
972/* Figure out the start address of kernel map from /proc/kallsyms.
973 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
974 * symbol_name if it's not that important.
975 */
976static int machine__get_running_kernel_start(struct machine *machine,
977 const char **symbol_name,
978 u64 *start, u64 *end)
979{
980 char filename[PATH_MAX];
981 int i, err = -1;
982 const char *name;
983 u64 addr = 0;
984
985 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
986
987 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
988 return 0;
989
990 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
991 err = kallsyms__get_function_start(filename, name, &addr);
992 if (!err)
993 break;
994 }
995
996 if (err)
997 return -1;
998
999 if (symbol_name)
1000 *symbol_name = name;
1001
1002 *start = addr;
1003
1004 err = kallsyms__get_symbol_start(filename, "_edata", &addr);
1005 if (err)
1006 err = kallsyms__get_symbol_start(filename, "_etext", &addr);
1007 if (!err)
1008 *end = addr;
1009
1010 return 0;
1011}
1012
1013int machine__create_extra_kernel_map(struct machine *machine,
1014 struct dso *kernel,
1015 struct extra_kernel_map *xm)
1016{
1017 struct kmap *kmap;
1018 struct map *map;
1019 int err;
1020
1021 map = map__new2(xm->start, kernel);
1022 if (!map)
1023 return -ENOMEM;
1024
1025 map__set_end(map, xm->end);
1026 map__set_pgoff(map, xm->pgoff);
1027
1028 kmap = map__kmap(map);
1029
1030 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1031
1032 err = maps__insert(machine__kernel_maps(machine), map);
1033
1034 if (!err) {
1035 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1036 kmap->name, map__start(map), map__end(map));
1037 }
1038
1039 map__put(map);
1040
1041 return err;
1042}
1043
1044static u64 find_entry_trampoline(struct dso *dso)
1045{
1046 /* Duplicates are removed so lookup all aliases */
1047 const char *syms[] = {
1048 "_entry_trampoline",
1049 "__entry_trampoline_start",
1050 "entry_SYSCALL_64_trampoline",
1051 };
1052 struct symbol *sym = dso__first_symbol(dso);
1053 unsigned int i;
1054
1055 for (; sym; sym = dso__next_symbol(sym)) {
1056 if (sym->binding != STB_GLOBAL)
1057 continue;
1058 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1059 if (!strcmp(sym->name, syms[i]))
1060 return sym->start;
1061 }
1062 }
1063
1064 return 0;
1065}
1066
1067/*
1068 * These values can be used for kernels that do not have symbols for the entry
1069 * trampolines in kallsyms.
1070 */
1071#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1072#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1073#define X86_64_ENTRY_TRAMPOLINE 0x6000
1074
1075struct machine__map_x86_64_entry_trampolines_args {
1076 struct maps *kmaps;
1077 bool found;
1078};
1079
1080static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
1081{
1082 struct machine__map_x86_64_entry_trampolines_args *args = data;
1083 struct map *dest_map;
1084 struct kmap *kmap = __map__kmap(map);
1085
1086 if (!kmap || !is_entry_trampoline(kmap->name))
1087 return 0;
1088
1089 dest_map = maps__find(args->kmaps, map__pgoff(map));
1090 if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map))
1091 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1092
1093 map__put(dest_map);
1094 args->found = true;
1095 return 0;
1096}
1097
1098/* Map x86_64 PTI entry trampolines */
1099int machine__map_x86_64_entry_trampolines(struct machine *machine,
1100 struct dso *kernel)
1101{
1102 struct machine__map_x86_64_entry_trampolines_args args = {
1103 .kmaps = machine__kernel_maps(machine),
1104 .found = false,
1105 };
1106 int nr_cpus_avail, cpu;
1107 u64 pgoff;
1108
1109 /*
1110 * In the vmlinux case, pgoff is a virtual address which must now be
1111 * mapped to a vmlinux offset.
1112 */
1113 maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
1114
1115 if (args.found || machine->trampolines_mapped)
1116 return 0;
1117
1118 pgoff = find_entry_trampoline(kernel);
1119 if (!pgoff)
1120 return 0;
1121
1122 nr_cpus_avail = machine__nr_cpus_avail(machine);
1123
1124 /* Add a 1 page map for each CPU's entry trampoline */
1125 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1126 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1127 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1128 X86_64_ENTRY_TRAMPOLINE;
1129 struct extra_kernel_map xm = {
1130 .start = va,
1131 .end = va + page_size,
1132 .pgoff = pgoff,
1133 };
1134
1135 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1136
1137 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1138 return -1;
1139 }
1140
1141 machine->trampolines_mapped = nr_cpus_avail;
1142
1143 return 0;
1144}
1145
1146int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1147 struct dso *kernel __maybe_unused)
1148{
1149 return 0;
1150}
1151
1152static int
1153__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1154{
1155 /* In case of renewal the kernel map, destroy previous one */
1156 machine__destroy_kernel_maps(machine);
1157
1158 map__put(machine->vmlinux_map);
1159 machine->vmlinux_map = map__new2(0, kernel);
1160 if (machine->vmlinux_map == NULL)
1161 return -ENOMEM;
1162
1163 map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
1164 return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1165}
1166
1167void machine__destroy_kernel_maps(struct machine *machine)
1168{
1169 struct kmap *kmap;
1170 struct map *map = machine__kernel_map(machine);
1171
1172 if (map == NULL)
1173 return;
1174
1175 kmap = map__kmap(map);
1176 maps__remove(machine__kernel_maps(machine), map);
1177 if (kmap && kmap->ref_reloc_sym) {
1178 zfree((char **)&kmap->ref_reloc_sym->name);
1179 zfree(&kmap->ref_reloc_sym);
1180 }
1181
1182 map__zput(machine->vmlinux_map);
1183}
1184
1185int machines__create_guest_kernel_maps(struct machines *machines)
1186{
1187 int ret = 0;
1188 struct dirent **namelist = NULL;
1189 int i, items = 0;
1190 char path[PATH_MAX];
1191 pid_t pid;
1192 char *endp;
1193
1194 if (symbol_conf.default_guest_vmlinux_name ||
1195 symbol_conf.default_guest_modules ||
1196 symbol_conf.default_guest_kallsyms) {
1197 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1198 }
1199
1200 if (symbol_conf.guestmount) {
1201 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1202 if (items <= 0)
1203 return -ENOENT;
1204 for (i = 0; i < items; i++) {
1205 if (!isdigit(namelist[i]->d_name[0])) {
1206 /* Filter out . and .. */
1207 continue;
1208 }
1209 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1210 if ((*endp != '\0') ||
1211 (endp == namelist[i]->d_name) ||
1212 (errno == ERANGE)) {
1213 pr_debug("invalid directory (%s). Skipping.\n",
1214 namelist[i]->d_name);
1215 continue;
1216 }
1217 sprintf(path, "%s/%s/proc/kallsyms",
1218 symbol_conf.guestmount,
1219 namelist[i]->d_name);
1220 ret = access(path, R_OK);
1221 if (ret) {
1222 pr_debug("Can't access file %s\n", path);
1223 goto failure;
1224 }
1225 machines__create_kernel_maps(machines, pid);
1226 }
1227failure:
1228 free(namelist);
1229 }
1230
1231 return ret;
1232}
1233
1234void machines__destroy_kernel_maps(struct machines *machines)
1235{
1236 struct rb_node *next = rb_first_cached(&machines->guests);
1237
1238 machine__destroy_kernel_maps(&machines->host);
1239
1240 while (next) {
1241 struct machine *pos = rb_entry(next, struct machine, rb_node);
1242
1243 next = rb_next(&pos->rb_node);
1244 rb_erase_cached(&pos->rb_node, &machines->guests);
1245 machine__delete(pos);
1246 }
1247}
1248
1249int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1250{
1251 struct machine *machine = machines__findnew(machines, pid);
1252
1253 if (machine == NULL)
1254 return -1;
1255
1256 return machine__create_kernel_maps(machine);
1257}
1258
1259int machine__load_kallsyms(struct machine *machine, const char *filename)
1260{
1261 struct map *map = machine__kernel_map(machine);
1262 struct dso *dso = map__dso(map);
1263 int ret = __dso__load_kallsyms(dso, filename, map, true);
1264
1265 if (ret > 0) {
1266 dso__set_loaded(dso);
1267 /*
1268 * Since /proc/kallsyms will have multiple sessions for the
1269 * kernel, with modules between them, fixup the end of all
1270 * sections.
1271 */
1272 maps__fixup_end(machine__kernel_maps(machine));
1273 }
1274
1275 return ret;
1276}
1277
1278int machine__load_vmlinux_path(struct machine *machine)
1279{
1280 struct map *map = machine__kernel_map(machine);
1281 struct dso *dso = map__dso(map);
1282 int ret = dso__load_vmlinux_path(dso, map);
1283
1284 if (ret > 0)
1285 dso__set_loaded(dso);
1286
1287 return ret;
1288}
1289
1290static char *get_kernel_version(const char *root_dir)
1291{
1292 char version[PATH_MAX];
1293 FILE *file;
1294 char *name, *tmp;
1295 const char *prefix = "Linux version ";
1296
1297 sprintf(version, "%s/proc/version", root_dir);
1298 file = fopen(version, "r");
1299 if (!file)
1300 return NULL;
1301
1302 tmp = fgets(version, sizeof(version), file);
1303 fclose(file);
1304 if (!tmp)
1305 return NULL;
1306
1307 name = strstr(version, prefix);
1308 if (!name)
1309 return NULL;
1310 name += strlen(prefix);
1311 tmp = strchr(name, ' ');
1312 if (tmp)
1313 *tmp = '\0';
1314
1315 return strdup(name);
1316}
1317
1318static bool is_kmod_dso(struct dso *dso)
1319{
1320 return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1321 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE;
1322}
1323
1324static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1325{
1326 char *long_name;
1327 struct dso *dso;
1328 struct map *map = maps__find_by_name(maps, m->name);
1329
1330 if (map == NULL)
1331 return 0;
1332
1333 long_name = strdup(path);
1334 if (long_name == NULL) {
1335 map__put(map);
1336 return -ENOMEM;
1337 }
1338
1339 dso = map__dso(map);
1340 dso__set_long_name(dso, long_name, true);
1341 dso__kernel_module_get_build_id(dso, "");
1342
1343 /*
1344 * Full name could reveal us kmod compression, so
1345 * we need to update the symtab_type if needed.
1346 */
1347 if (m->comp && is_kmod_dso(dso)) {
1348 dso__set_symtab_type(dso, dso__symtab_type(dso)+1);
1349 dso__set_comp(dso, m->comp);
1350 }
1351 map__put(map);
1352 return 0;
1353}
1354
1355static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1356{
1357 struct dirent *dent;
1358 DIR *dir = opendir(dir_name);
1359 int ret = 0;
1360
1361 if (!dir) {
1362 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1363 return -1;
1364 }
1365
1366 while ((dent = readdir(dir)) != NULL) {
1367 char path[PATH_MAX];
1368 struct stat st;
1369
1370 /*sshfs might return bad dent->d_type, so we have to stat*/
1371 path__join(path, sizeof(path), dir_name, dent->d_name);
1372 if (stat(path, &st))
1373 continue;
1374
1375 if (S_ISDIR(st.st_mode)) {
1376 if (!strcmp(dent->d_name, ".") ||
1377 !strcmp(dent->d_name, ".."))
1378 continue;
1379
1380 /* Do not follow top-level source and build symlinks */
1381 if (depth == 0) {
1382 if (!strcmp(dent->d_name, "source") ||
1383 !strcmp(dent->d_name, "build"))
1384 continue;
1385 }
1386
1387 ret = maps__set_modules_path_dir(maps, path, depth + 1);
1388 if (ret < 0)
1389 goto out;
1390 } else {
1391 struct kmod_path m;
1392
1393 ret = kmod_path__parse_name(&m, dent->d_name);
1394 if (ret)
1395 goto out;
1396
1397 if (m.kmod)
1398 ret = maps__set_module_path(maps, path, &m);
1399
1400 zfree(&m.name);
1401
1402 if (ret)
1403 goto out;
1404 }
1405 }
1406
1407out:
1408 closedir(dir);
1409 return ret;
1410}
1411
1412static int machine__set_modules_path(struct machine *machine)
1413{
1414 char *version;
1415 char modules_path[PATH_MAX];
1416
1417 version = get_kernel_version(machine->root_dir);
1418 if (!version)
1419 return -1;
1420
1421 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1422 machine->root_dir, version);
1423 free(version);
1424
1425 return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1426}
1427int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1428 u64 *size __maybe_unused,
1429 const char *name __maybe_unused)
1430{
1431 return 0;
1432}
1433
1434static int machine__create_module(void *arg, const char *name, u64 start,
1435 u64 size)
1436{
1437 struct machine *machine = arg;
1438 struct map *map;
1439
1440 if (arch__fix_module_text_start(&start, &size, name) < 0)
1441 return -1;
1442
1443 map = machine__addnew_module_map(machine, start, name);
1444 if (map == NULL)
1445 return -1;
1446 map__set_end(map, start + size);
1447
1448 dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1449 map__put(map);
1450 return 0;
1451}
1452
1453static int machine__create_modules(struct machine *machine)
1454{
1455 const char *modules;
1456 char path[PATH_MAX];
1457
1458 if (machine__is_default_guest(machine)) {
1459 modules = symbol_conf.default_guest_modules;
1460 } else {
1461 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1462 modules = path;
1463 }
1464
1465 if (symbol__restricted_filename(modules, "/proc/modules"))
1466 return -1;
1467
1468 if (modules__parse(modules, machine, machine__create_module))
1469 return -1;
1470
1471 if (!machine__set_modules_path(machine))
1472 return 0;
1473
1474 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1475
1476 return 0;
1477}
1478
1479static void machine__set_kernel_mmap(struct machine *machine,
1480 u64 start, u64 end)
1481{
1482 map__set_start(machine->vmlinux_map, start);
1483 map__set_end(machine->vmlinux_map, end);
1484 /*
1485 * Be a bit paranoid here, some perf.data file came with
1486 * a zero sized synthesized MMAP event for the kernel.
1487 */
1488 if (start == 0 && end == 0)
1489 map__set_end(machine->vmlinux_map, ~0ULL);
1490}
1491
1492static int machine__update_kernel_mmap(struct machine *machine,
1493 u64 start, u64 end)
1494{
1495 struct map *orig, *updated;
1496 int err;
1497
1498 orig = machine->vmlinux_map;
1499 updated = map__get(orig);
1500
1501 machine->vmlinux_map = updated;
1502 maps__remove(machine__kernel_maps(machine), orig);
1503 machine__set_kernel_mmap(machine, start, end);
1504 err = maps__insert(machine__kernel_maps(machine), updated);
1505 map__put(orig);
1506
1507 return err;
1508}
1509
1510int machine__create_kernel_maps(struct machine *machine)
1511{
1512 struct dso *kernel = machine__get_kernel(machine);
1513 const char *name = NULL;
1514 u64 start = 0, end = ~0ULL;
1515 int ret;
1516
1517 if (kernel == NULL)
1518 return -1;
1519
1520 ret = __machine__create_kernel_maps(machine, kernel);
1521 if (ret < 0)
1522 goto out_put;
1523
1524 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1525 if (machine__is_host(machine))
1526 pr_debug("Problems creating module maps, "
1527 "continuing anyway...\n");
1528 else
1529 pr_debug("Problems creating module maps for guest %d, "
1530 "continuing anyway...\n", machine->pid);
1531 }
1532
1533 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1534 if (name &&
1535 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1536 machine__destroy_kernel_maps(machine);
1537 ret = -1;
1538 goto out_put;
1539 }
1540
1541 /*
1542 * we have a real start address now, so re-order the kmaps
1543 * assume it's the last in the kmaps
1544 */
1545 ret = machine__update_kernel_mmap(machine, start, end);
1546 if (ret < 0)
1547 goto out_put;
1548 }
1549
1550 if (machine__create_extra_kernel_maps(machine, kernel))
1551 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1552
1553 if (end == ~0ULL) {
1554 /* update end address of the kernel map using adjacent module address */
1555 struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
1556 machine__kernel_map(machine));
1557
1558 if (next) {
1559 machine__set_kernel_mmap(machine, start, map__start(next));
1560 map__put(next);
1561 }
1562 }
1563
1564out_put:
1565 dso__put(kernel);
1566 return ret;
1567}
1568
1569static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused)
1570{
1571 return dso__is_kcore(dso) ? 1 : 0;
1572}
1573
1574static bool machine__uses_kcore(struct machine *machine)
1575{
1576 return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false;
1577}
1578
1579static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1580 struct extra_kernel_map *xm)
1581{
1582 return machine__is(machine, "x86_64") &&
1583 is_entry_trampoline(xm->name);
1584}
1585
1586static int machine__process_extra_kernel_map(struct machine *machine,
1587 struct extra_kernel_map *xm)
1588{
1589 struct dso *kernel = machine__kernel_dso(machine);
1590
1591 if (kernel == NULL)
1592 return -1;
1593
1594 return machine__create_extra_kernel_map(machine, kernel, xm);
1595}
1596
1597static int machine__process_kernel_mmap_event(struct machine *machine,
1598 struct extra_kernel_map *xm,
1599 struct build_id *bid)
1600{
1601 enum dso_space_type dso_space;
1602 bool is_kernel_mmap;
1603 const char *mmap_name = machine->mmap_name;
1604
1605 /* If we have maps from kcore then we do not need or want any others */
1606 if (machine__uses_kcore(machine))
1607 return 0;
1608
1609 if (machine__is_host(machine))
1610 dso_space = DSO_SPACE__KERNEL;
1611 else
1612 dso_space = DSO_SPACE__KERNEL_GUEST;
1613
1614 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1615 if (!is_kernel_mmap && !machine__is_host(machine)) {
1616 /*
1617 * If the event was recorded inside the guest and injected into
1618 * the host perf.data file, then it will match a host mmap_name,
1619 * so try that - see machine__set_mmap_name().
1620 */
1621 mmap_name = "[kernel.kallsyms]";
1622 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1623 }
1624 if (xm->name[0] == '/' ||
1625 (!is_kernel_mmap && xm->name[0] == '[')) {
1626 struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
1627
1628 if (map == NULL)
1629 goto out_problem;
1630
1631 map__set_end(map, map__start(map) + xm->end - xm->start);
1632
1633 if (build_id__is_defined(bid))
1634 dso__set_build_id(map__dso(map), bid);
1635
1636 map__put(map);
1637 } else if (is_kernel_mmap) {
1638 const char *symbol_name = xm->name + strlen(mmap_name);
1639 /*
1640 * Should be there already, from the build-id table in
1641 * the header.
1642 */
1643 struct dso *kernel = dsos__find_kernel_dso(&machine->dsos);
1644
1645 if (kernel == NULL)
1646 kernel = machine__findnew_dso(machine, machine->mmap_name);
1647 if (kernel == NULL)
1648 goto out_problem;
1649
1650 dso__set_kernel(kernel, dso_space);
1651 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1652 dso__put(kernel);
1653 goto out_problem;
1654 }
1655
1656 if (strstr(dso__long_name(kernel), "vmlinux"))
1657 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1658
1659 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1660 dso__put(kernel);
1661 goto out_problem;
1662 }
1663
1664 if (build_id__is_defined(bid))
1665 dso__set_build_id(kernel, bid);
1666
1667 /*
1668 * Avoid using a zero address (kptr_restrict) for the ref reloc
1669 * symbol. Effectively having zero here means that at record
1670 * time /proc/sys/kernel/kptr_restrict was non zero.
1671 */
1672 if (xm->pgoff != 0) {
1673 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1674 symbol_name,
1675 xm->pgoff);
1676 }
1677
1678 if (machine__is_default_guest(machine)) {
1679 /*
1680 * preload dso of guest kernel and modules
1681 */
1682 dso__load(kernel, machine__kernel_map(machine));
1683 }
1684 dso__put(kernel);
1685 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1686 return machine__process_extra_kernel_map(machine, xm);
1687 }
1688 return 0;
1689out_problem:
1690 return -1;
1691}
1692
1693int machine__process_mmap2_event(struct machine *machine,
1694 union perf_event *event,
1695 struct perf_sample *sample)
1696{
1697 struct thread *thread;
1698 struct map *map;
1699 struct dso_id dso_id = {
1700 .maj = event->mmap2.maj,
1701 .min = event->mmap2.min,
1702 .ino = event->mmap2.ino,
1703 .ino_generation = event->mmap2.ino_generation,
1704 };
1705 struct build_id __bid, *bid = NULL;
1706 int ret = 0;
1707
1708 if (dump_trace)
1709 perf_event__fprintf_mmap2(event, stdout);
1710
1711 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1712 bid = &__bid;
1713 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1714 }
1715
1716 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1717 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1718 struct extra_kernel_map xm = {
1719 .start = event->mmap2.start,
1720 .end = event->mmap2.start + event->mmap2.len,
1721 .pgoff = event->mmap2.pgoff,
1722 };
1723
1724 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1725 ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1726 if (ret < 0)
1727 goto out_problem;
1728 return 0;
1729 }
1730
1731 thread = machine__findnew_thread(machine, event->mmap2.pid,
1732 event->mmap2.tid);
1733 if (thread == NULL)
1734 goto out_problem;
1735
1736 map = map__new(machine, event->mmap2.start,
1737 event->mmap2.len, event->mmap2.pgoff,
1738 &dso_id, event->mmap2.prot,
1739 event->mmap2.flags, bid,
1740 event->mmap2.filename, thread);
1741
1742 if (map == NULL)
1743 goto out_problem_map;
1744
1745 ret = thread__insert_map(thread, map);
1746 if (ret)
1747 goto out_problem_insert;
1748
1749 thread__put(thread);
1750 map__put(map);
1751 return 0;
1752
1753out_problem_insert:
1754 map__put(map);
1755out_problem_map:
1756 thread__put(thread);
1757out_problem:
1758 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1759 return 0;
1760}
1761
1762int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1763 struct perf_sample *sample)
1764{
1765 struct thread *thread;
1766 struct map *map;
1767 u32 prot = 0;
1768 int ret = 0;
1769
1770 if (dump_trace)
1771 perf_event__fprintf_mmap(event, stdout);
1772
1773 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1774 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1775 struct extra_kernel_map xm = {
1776 .start = event->mmap.start,
1777 .end = event->mmap.start + event->mmap.len,
1778 .pgoff = event->mmap.pgoff,
1779 };
1780
1781 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1782 ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1783 if (ret < 0)
1784 goto out_problem;
1785 return 0;
1786 }
1787
1788 thread = machine__findnew_thread(machine, event->mmap.pid,
1789 event->mmap.tid);
1790 if (thread == NULL)
1791 goto out_problem;
1792
1793 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1794 prot = PROT_EXEC;
1795
1796 map = map__new(machine, event->mmap.start,
1797 event->mmap.len, event->mmap.pgoff,
1798 NULL, prot, 0, NULL, event->mmap.filename, thread);
1799
1800 if (map == NULL)
1801 goto out_problem_map;
1802
1803 ret = thread__insert_map(thread, map);
1804 if (ret)
1805 goto out_problem_insert;
1806
1807 thread__put(thread);
1808 map__put(map);
1809 return 0;
1810
1811out_problem_insert:
1812 map__put(map);
1813out_problem_map:
1814 thread__put(thread);
1815out_problem:
1816 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1817 return 0;
1818}
1819
1820void machine__remove_thread(struct machine *machine, struct thread *th)
1821{
1822 return threads__remove(&machine->threads, th);
1823}
1824
1825int machine__process_fork_event(struct machine *machine, union perf_event *event,
1826 struct perf_sample *sample)
1827{
1828 struct thread *thread = machine__find_thread(machine,
1829 event->fork.pid,
1830 event->fork.tid);
1831 struct thread *parent = machine__findnew_thread(machine,
1832 event->fork.ppid,
1833 event->fork.ptid);
1834 bool do_maps_clone = true;
1835 int err = 0;
1836
1837 if (dump_trace)
1838 perf_event__fprintf_task(event, stdout);
1839
1840 /*
1841 * There may be an existing thread that is not actually the parent,
1842 * either because we are processing events out of order, or because the
1843 * (fork) event that would have removed the thread was lost. Assume the
1844 * latter case and continue on as best we can.
1845 */
1846 if (thread__pid(parent) != (pid_t)event->fork.ppid) {
1847 dump_printf("removing erroneous parent thread %d/%d\n",
1848 thread__pid(parent), thread__tid(parent));
1849 machine__remove_thread(machine, parent);
1850 thread__put(parent);
1851 parent = machine__findnew_thread(machine, event->fork.ppid,
1852 event->fork.ptid);
1853 }
1854
1855 /* if a thread currently exists for the thread id remove it */
1856 if (thread != NULL) {
1857 machine__remove_thread(machine, thread);
1858 thread__put(thread);
1859 }
1860
1861 thread = machine__findnew_thread(machine, event->fork.pid,
1862 event->fork.tid);
1863 /*
1864 * When synthesizing FORK events, we are trying to create thread
1865 * objects for the already running tasks on the machine.
1866 *
1867 * Normally, for a kernel FORK event, we want to clone the parent's
1868 * maps because that is what the kernel just did.
1869 *
1870 * But when synthesizing, this should not be done. If we do, we end up
1871 * with overlapping maps as we process the synthesized MMAP2 events that
1872 * get delivered shortly thereafter.
1873 *
1874 * Use the FORK event misc flags in an internal way to signal this
1875 * situation, so we can elide the map clone when appropriate.
1876 */
1877 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1878 do_maps_clone = false;
1879
1880 if (thread == NULL || parent == NULL ||
1881 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1882 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1883 err = -1;
1884 }
1885 thread__put(thread);
1886 thread__put(parent);
1887
1888 return err;
1889}
1890
1891int machine__process_exit_event(struct machine *machine, union perf_event *event,
1892 struct perf_sample *sample __maybe_unused)
1893{
1894 struct thread *thread = machine__find_thread(machine,
1895 event->fork.pid,
1896 event->fork.tid);
1897
1898 if (dump_trace)
1899 perf_event__fprintf_task(event, stdout);
1900
1901 if (thread != NULL) {
1902 if (symbol_conf.keep_exited_threads)
1903 thread__set_exited(thread, /*exited=*/true);
1904 else
1905 machine__remove_thread(machine, thread);
1906 }
1907 thread__put(thread);
1908 return 0;
1909}
1910
1911int machine__process_event(struct machine *machine, union perf_event *event,
1912 struct perf_sample *sample)
1913{
1914 int ret;
1915
1916 switch (event->header.type) {
1917 case PERF_RECORD_COMM:
1918 ret = machine__process_comm_event(machine, event, sample); break;
1919 case PERF_RECORD_MMAP:
1920 ret = machine__process_mmap_event(machine, event, sample); break;
1921 case PERF_RECORD_NAMESPACES:
1922 ret = machine__process_namespaces_event(machine, event, sample); break;
1923 case PERF_RECORD_CGROUP:
1924 ret = machine__process_cgroup_event(machine, event, sample); break;
1925 case PERF_RECORD_MMAP2:
1926 ret = machine__process_mmap2_event(machine, event, sample); break;
1927 case PERF_RECORD_FORK:
1928 ret = machine__process_fork_event(machine, event, sample); break;
1929 case PERF_RECORD_EXIT:
1930 ret = machine__process_exit_event(machine, event, sample); break;
1931 case PERF_RECORD_LOST:
1932 ret = machine__process_lost_event(machine, event, sample); break;
1933 case PERF_RECORD_AUX:
1934 ret = machine__process_aux_event(machine, event); break;
1935 case PERF_RECORD_ITRACE_START:
1936 ret = machine__process_itrace_start_event(machine, event); break;
1937 case PERF_RECORD_LOST_SAMPLES:
1938 ret = machine__process_lost_samples_event(machine, event, sample); break;
1939 case PERF_RECORD_SWITCH:
1940 case PERF_RECORD_SWITCH_CPU_WIDE:
1941 ret = machine__process_switch_event(machine, event); break;
1942 case PERF_RECORD_KSYMBOL:
1943 ret = machine__process_ksymbol(machine, event, sample); break;
1944 case PERF_RECORD_BPF_EVENT:
1945 ret = machine__process_bpf(machine, event, sample); break;
1946 case PERF_RECORD_TEXT_POKE:
1947 ret = machine__process_text_poke(machine, event, sample); break;
1948 case PERF_RECORD_AUX_OUTPUT_HW_ID:
1949 ret = machine__process_aux_output_hw_id_event(machine, event); break;
1950 default:
1951 ret = -1;
1952 break;
1953 }
1954
1955 return ret;
1956}
1957
1958static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1959{
1960 return regexec(regex, sym->name, 0, NULL, 0) == 0;
1961}
1962
1963static void ip__resolve_ams(struct thread *thread,
1964 struct addr_map_symbol *ams,
1965 u64 ip)
1966{
1967 struct addr_location al;
1968
1969 addr_location__init(&al);
1970 /*
1971 * We cannot use the header.misc hint to determine whether a
1972 * branch stack address is user, kernel, guest, hypervisor.
1973 * Branches may straddle the kernel/user/hypervisor boundaries.
1974 * Thus, we have to try consecutively until we find a match
1975 * or else, the symbol is unknown
1976 */
1977 thread__find_cpumode_addr_location(thread, ip, &al);
1978
1979 ams->addr = ip;
1980 ams->al_addr = al.addr;
1981 ams->al_level = al.level;
1982 ams->ms.maps = maps__get(al.maps);
1983 ams->ms.sym = al.sym;
1984 ams->ms.map = map__get(al.map);
1985 ams->phys_addr = 0;
1986 ams->data_page_size = 0;
1987 addr_location__exit(&al);
1988}
1989
1990static void ip__resolve_data(struct thread *thread,
1991 u8 m, struct addr_map_symbol *ams,
1992 u64 addr, u64 phys_addr, u64 daddr_page_size)
1993{
1994 struct addr_location al;
1995
1996 addr_location__init(&al);
1997
1998 thread__find_symbol(thread, m, addr, &al);
1999
2000 ams->addr = addr;
2001 ams->al_addr = al.addr;
2002 ams->al_level = al.level;
2003 ams->ms.maps = maps__get(al.maps);
2004 ams->ms.sym = al.sym;
2005 ams->ms.map = map__get(al.map);
2006 ams->phys_addr = phys_addr;
2007 ams->data_page_size = daddr_page_size;
2008 addr_location__exit(&al);
2009}
2010
2011struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2012 struct addr_location *al)
2013{
2014 struct mem_info *mi = mem_info__new();
2015
2016 if (!mi)
2017 return NULL;
2018
2019 ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip);
2020 ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi),
2021 sample->addr, sample->phys_addr,
2022 sample->data_page_size);
2023 mem_info__data_src(mi)->val = sample->data_src;
2024
2025 return mi;
2026}
2027
2028static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2029{
2030 struct map *map = ms->map;
2031 char *srcline = NULL;
2032 struct dso *dso;
2033
2034 if (!map || callchain_param.key == CCKEY_FUNCTION)
2035 return srcline;
2036
2037 dso = map__dso(map);
2038 srcline = srcline__tree_find(dso__srclines(dso), ip);
2039 if (!srcline) {
2040 bool show_sym = false;
2041 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2042
2043 srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2044 ms->sym, show_sym, show_addr, ip);
2045 srcline__tree_insert(dso__srclines(dso), ip, srcline);
2046 }
2047
2048 return srcline;
2049}
2050
2051struct iterations {
2052 int nr_loop_iter;
2053 u64 cycles;
2054};
2055
2056static int add_callchain_ip(struct thread *thread,
2057 struct callchain_cursor *cursor,
2058 struct symbol **parent,
2059 struct addr_location *root_al,
2060 u8 *cpumode,
2061 u64 ip,
2062 bool branch,
2063 struct branch_flags *flags,
2064 struct iterations *iter,
2065 u64 branch_from,
2066 bool symbols)
2067{
2068 struct map_symbol ms = {};
2069 struct addr_location al;
2070 int nr_loop_iter = 0, err = 0;
2071 u64 iter_cycles = 0;
2072 const char *srcline = NULL;
2073
2074 addr_location__init(&al);
2075 al.filtered = 0;
2076 al.sym = NULL;
2077 al.srcline = NULL;
2078 if (!cpumode) {
2079 thread__find_cpumode_addr_location(thread, ip, &al);
2080 } else {
2081 if (ip >= PERF_CONTEXT_MAX) {
2082 switch (ip) {
2083 case PERF_CONTEXT_HV:
2084 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2085 break;
2086 case PERF_CONTEXT_KERNEL:
2087 *cpumode = PERF_RECORD_MISC_KERNEL;
2088 break;
2089 case PERF_CONTEXT_USER:
2090 *cpumode = PERF_RECORD_MISC_USER;
2091 break;
2092 default:
2093 pr_debug("invalid callchain context: "
2094 "%"PRId64"\n", (s64) ip);
2095 /*
2096 * It seems the callchain is corrupted.
2097 * Discard all.
2098 */
2099 callchain_cursor_reset(cursor);
2100 err = 1;
2101 goto out;
2102 }
2103 goto out;
2104 }
2105 if (symbols)
2106 thread__find_symbol(thread, *cpumode, ip, &al);
2107 }
2108
2109 if (al.sym != NULL) {
2110 if (perf_hpp_list.parent && !*parent &&
2111 symbol__match_regex(al.sym, &parent_regex))
2112 *parent = al.sym;
2113 else if (have_ignore_callees && root_al &&
2114 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2115 /* Treat this symbol as the root,
2116 forgetting its callees. */
2117 addr_location__copy(root_al, &al);
2118 callchain_cursor_reset(cursor);
2119 }
2120 }
2121
2122 if (symbol_conf.hide_unresolved && al.sym == NULL)
2123 goto out;
2124
2125 if (iter) {
2126 nr_loop_iter = iter->nr_loop_iter;
2127 iter_cycles = iter->cycles;
2128 }
2129
2130 ms.maps = maps__get(al.maps);
2131 ms.map = map__get(al.map);
2132 ms.sym = al.sym;
2133 srcline = callchain_srcline(&ms, al.addr);
2134 err = callchain_cursor_append(cursor, ip, &ms,
2135 branch, flags, nr_loop_iter,
2136 iter_cycles, branch_from, srcline);
2137out:
2138 addr_location__exit(&al);
2139 map_symbol__exit(&ms);
2140 return err;
2141}
2142
2143struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2144 struct addr_location *al)
2145{
2146 unsigned int i;
2147 const struct branch_stack *bs = sample->branch_stack;
2148 struct branch_entry *entries = perf_sample__branch_entries(sample);
2149 u64 *branch_stack_cntr = sample->branch_stack_cntr;
2150 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2151
2152 if (!bi)
2153 return NULL;
2154
2155 for (i = 0; i < bs->nr; i++) {
2156 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2157 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2158 bi[i].flags = entries[i].flags;
2159 if (branch_stack_cntr)
2160 bi[i].branch_stack_cntr = branch_stack_cntr[i];
2161 }
2162 return bi;
2163}
2164
2165static void save_iterations(struct iterations *iter,
2166 struct branch_entry *be, int nr)
2167{
2168 int i;
2169
2170 iter->nr_loop_iter++;
2171 iter->cycles = 0;
2172
2173 for (i = 0; i < nr; i++)
2174 iter->cycles += be[i].flags.cycles;
2175}
2176
2177#define CHASHSZ 127
2178#define CHASHBITS 7
2179#define NO_ENTRY 0xff
2180
2181#define PERF_MAX_BRANCH_DEPTH 127
2182
2183/* Remove loops. */
2184static int remove_loops(struct branch_entry *l, int nr,
2185 struct iterations *iter)
2186{
2187 int i, j, off;
2188 unsigned char chash[CHASHSZ];
2189
2190 memset(chash, NO_ENTRY, sizeof(chash));
2191
2192 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2193
2194 for (i = 0; i < nr; i++) {
2195 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2196
2197 /* no collision handling for now */
2198 if (chash[h] == NO_ENTRY) {
2199 chash[h] = i;
2200 } else if (l[chash[h]].from == l[i].from) {
2201 bool is_loop = true;
2202 /* check if it is a real loop */
2203 off = 0;
2204 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2205 if (l[j].from != l[i + off].from) {
2206 is_loop = false;
2207 break;
2208 }
2209 if (is_loop) {
2210 j = nr - (i + off);
2211 if (j > 0) {
2212 save_iterations(iter + i + off,
2213 l + i, off);
2214
2215 memmove(iter + i, iter + i + off,
2216 j * sizeof(*iter));
2217
2218 memmove(l + i, l + i + off,
2219 j * sizeof(*l));
2220 }
2221
2222 nr -= off;
2223 }
2224 }
2225 }
2226 return nr;
2227}
2228
2229static int lbr_callchain_add_kernel_ip(struct thread *thread,
2230 struct callchain_cursor *cursor,
2231 struct perf_sample *sample,
2232 struct symbol **parent,
2233 struct addr_location *root_al,
2234 u64 branch_from,
2235 bool callee, int end,
2236 bool symbols)
2237{
2238 struct ip_callchain *chain = sample->callchain;
2239 u8 cpumode = PERF_RECORD_MISC_USER;
2240 int err, i;
2241
2242 if (callee) {
2243 for (i = 0; i < end + 1; i++) {
2244 err = add_callchain_ip(thread, cursor, parent,
2245 root_al, &cpumode, chain->ips[i],
2246 false, NULL, NULL, branch_from,
2247 symbols);
2248 if (err)
2249 return err;
2250 }
2251 return 0;
2252 }
2253
2254 for (i = end; i >= 0; i--) {
2255 err = add_callchain_ip(thread, cursor, parent,
2256 root_al, &cpumode, chain->ips[i],
2257 false, NULL, NULL, branch_from,
2258 symbols);
2259 if (err)
2260 return err;
2261 }
2262
2263 return 0;
2264}
2265
2266static void save_lbr_cursor_node(struct thread *thread,
2267 struct callchain_cursor *cursor,
2268 int idx)
2269{
2270 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2271
2272 if (!lbr_stitch)
2273 return;
2274
2275 if (cursor->pos == cursor->nr) {
2276 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2277 return;
2278 }
2279
2280 if (!cursor->curr)
2281 cursor->curr = cursor->first;
2282 else
2283 cursor->curr = cursor->curr->next;
2284
2285 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
2286 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2287 sizeof(struct callchain_cursor_node));
2288 lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
2289 lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
2290
2291 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2292 cursor->pos++;
2293}
2294
2295static int lbr_callchain_add_lbr_ip(struct thread *thread,
2296 struct callchain_cursor *cursor,
2297 struct perf_sample *sample,
2298 struct symbol **parent,
2299 struct addr_location *root_al,
2300 u64 *branch_from,
2301 bool callee,
2302 bool symbols)
2303{
2304 struct branch_stack *lbr_stack = sample->branch_stack;
2305 struct branch_entry *entries = perf_sample__branch_entries(sample);
2306 u8 cpumode = PERF_RECORD_MISC_USER;
2307 int lbr_nr = lbr_stack->nr;
2308 struct branch_flags *flags;
2309 int err, i;
2310 u64 ip;
2311
2312 /*
2313 * The curr and pos are not used in writing session. They are cleared
2314 * in callchain_cursor_commit() when the writing session is closed.
2315 * Using curr and pos to track the current cursor node.
2316 */
2317 if (thread__lbr_stitch(thread)) {
2318 cursor->curr = NULL;
2319 cursor->pos = cursor->nr;
2320 if (cursor->nr) {
2321 cursor->curr = cursor->first;
2322 for (i = 0; i < (int)(cursor->nr - 1); i++)
2323 cursor->curr = cursor->curr->next;
2324 }
2325 }
2326
2327 if (callee) {
2328 /* Add LBR ip from first entries.to */
2329 ip = entries[0].to;
2330 flags = &entries[0].flags;
2331 *branch_from = entries[0].from;
2332 err = add_callchain_ip(thread, cursor, parent,
2333 root_al, &cpumode, ip,
2334 true, flags, NULL,
2335 *branch_from, symbols);
2336 if (err)
2337 return err;
2338
2339 /*
2340 * The number of cursor node increases.
2341 * Move the current cursor node.
2342 * But does not need to save current cursor node for entry 0.
2343 * It's impossible to stitch the whole LBRs of previous sample.
2344 */
2345 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2346 if (!cursor->curr)
2347 cursor->curr = cursor->first;
2348 else
2349 cursor->curr = cursor->curr->next;
2350 cursor->pos++;
2351 }
2352
2353 /* Add LBR ip from entries.from one by one. */
2354 for (i = 0; i < lbr_nr; i++) {
2355 ip = entries[i].from;
2356 flags = &entries[i].flags;
2357 err = add_callchain_ip(thread, cursor, parent,
2358 root_al, &cpumode, ip,
2359 true, flags, NULL,
2360 *branch_from, symbols);
2361 if (err)
2362 return err;
2363 save_lbr_cursor_node(thread, cursor, i);
2364 }
2365 return 0;
2366 }
2367
2368 /* Add LBR ip from entries.from one by one. */
2369 for (i = lbr_nr - 1; i >= 0; i--) {
2370 ip = entries[i].from;
2371 flags = &entries[i].flags;
2372 err = add_callchain_ip(thread, cursor, parent,
2373 root_al, &cpumode, ip,
2374 true, flags, NULL,
2375 *branch_from, symbols);
2376 if (err)
2377 return err;
2378 save_lbr_cursor_node(thread, cursor, i);
2379 }
2380
2381 if (lbr_nr > 0) {
2382 /* Add LBR ip from first entries.to */
2383 ip = entries[0].to;
2384 flags = &entries[0].flags;
2385 *branch_from = entries[0].from;
2386 err = add_callchain_ip(thread, cursor, parent,
2387 root_al, &cpumode, ip,
2388 true, flags, NULL,
2389 *branch_from, symbols);
2390 if (err)
2391 return err;
2392 }
2393
2394 return 0;
2395}
2396
2397static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2398 struct callchain_cursor *cursor)
2399{
2400 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2401 struct callchain_cursor_node *cnode;
2402 struct stitch_list *stitch_node;
2403 int err;
2404
2405 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2406 cnode = &stitch_node->cursor;
2407
2408 err = callchain_cursor_append(cursor, cnode->ip,
2409 &cnode->ms,
2410 cnode->branch,
2411 &cnode->branch_flags,
2412 cnode->nr_loop_iter,
2413 cnode->iter_cycles,
2414 cnode->branch_from,
2415 cnode->srcline);
2416 if (err)
2417 return err;
2418 }
2419 return 0;
2420}
2421
2422static struct stitch_list *get_stitch_node(struct thread *thread)
2423{
2424 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2425 struct stitch_list *stitch_node;
2426
2427 if (!list_empty(&lbr_stitch->free_lists)) {
2428 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2429 struct stitch_list, node);
2430 list_del(&stitch_node->node);
2431
2432 return stitch_node;
2433 }
2434
2435 return malloc(sizeof(struct stitch_list));
2436}
2437
2438static bool has_stitched_lbr(struct thread *thread,
2439 struct perf_sample *cur,
2440 struct perf_sample *prev,
2441 unsigned int max_lbr,
2442 bool callee)
2443{
2444 struct branch_stack *cur_stack = cur->branch_stack;
2445 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2446 struct branch_stack *prev_stack = prev->branch_stack;
2447 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2448 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2449 int i, j, nr_identical_branches = 0;
2450 struct stitch_list *stitch_node;
2451 u64 cur_base, distance;
2452
2453 if (!cur_stack || !prev_stack)
2454 return false;
2455
2456 /* Find the physical index of the base-of-stack for current sample. */
2457 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2458
2459 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2460 (max_lbr + prev_stack->hw_idx - cur_base);
2461 /* Previous sample has shorter stack. Nothing can be stitched. */
2462 if (distance + 1 > prev_stack->nr)
2463 return false;
2464
2465 /*
2466 * Check if there are identical LBRs between two samples.
2467 * Identical LBRs must have same from, to and flags values. Also,
2468 * they have to be saved in the same LBR registers (same physical
2469 * index).
2470 *
2471 * Starts from the base-of-stack of current sample.
2472 */
2473 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2474 if ((prev_entries[i].from != cur_entries[j].from) ||
2475 (prev_entries[i].to != cur_entries[j].to) ||
2476 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2477 break;
2478 nr_identical_branches++;
2479 }
2480
2481 if (!nr_identical_branches)
2482 return false;
2483
2484 /*
2485 * Save the LBRs between the base-of-stack of previous sample
2486 * and the base-of-stack of current sample into lbr_stitch->lists.
2487 * These LBRs will be stitched later.
2488 */
2489 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2490
2491 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2492 continue;
2493
2494 stitch_node = get_stitch_node(thread);
2495 if (!stitch_node)
2496 return false;
2497
2498 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2499 sizeof(struct callchain_cursor_node));
2500
2501 stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
2502 stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
2503
2504 if (callee)
2505 list_add(&stitch_node->node, &lbr_stitch->lists);
2506 else
2507 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2508 }
2509
2510 return true;
2511}
2512
2513static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2514{
2515 if (thread__lbr_stitch(thread))
2516 return true;
2517
2518 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2519 if (!thread__lbr_stitch(thread))
2520 goto err;
2521
2522 thread__lbr_stitch(thread)->prev_lbr_cursor =
2523 calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2524 if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2525 goto free_lbr_stitch;
2526
2527 thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
2528
2529 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2530 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2531
2532 return true;
2533
2534free_lbr_stitch:
2535 free(thread__lbr_stitch(thread));
2536 thread__set_lbr_stitch(thread, NULL);
2537err:
2538 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2539 thread__set_lbr_stitch_enable(thread, false);
2540 return false;
2541}
2542
2543/*
2544 * Resolve LBR callstack chain sample
2545 * Return:
2546 * 1 on success get LBR callchain information
2547 * 0 no available LBR callchain information, should try fp
2548 * negative error code on other errors.
2549 */
2550static int resolve_lbr_callchain_sample(struct thread *thread,
2551 struct callchain_cursor *cursor,
2552 struct perf_sample *sample,
2553 struct symbol **parent,
2554 struct addr_location *root_al,
2555 int max_stack,
2556 unsigned int max_lbr,
2557 bool symbols)
2558{
2559 bool callee = (callchain_param.order == ORDER_CALLEE);
2560 struct ip_callchain *chain = sample->callchain;
2561 int chain_nr = min(max_stack, (int)chain->nr), i;
2562 struct lbr_stitch *lbr_stitch;
2563 bool stitched_lbr = false;
2564 u64 branch_from = 0;
2565 int err;
2566
2567 for (i = 0; i < chain_nr; i++) {
2568 if (chain->ips[i] == PERF_CONTEXT_USER)
2569 break;
2570 }
2571
2572 /* LBR only affects the user callchain */
2573 if (i == chain_nr)
2574 return 0;
2575
2576 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2577 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2578 lbr_stitch = thread__lbr_stitch(thread);
2579
2580 stitched_lbr = has_stitched_lbr(thread, sample,
2581 &lbr_stitch->prev_sample,
2582 max_lbr, callee);
2583
2584 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2585 struct stitch_list *stitch_node;
2586
2587 list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
2588 map_symbol__exit(&stitch_node->cursor.ms);
2589
2590 list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
2591 }
2592 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2593 }
2594
2595 if (callee) {
2596 /* Add kernel ip */
2597 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2598 parent, root_al, branch_from,
2599 true, i, symbols);
2600 if (err)
2601 goto error;
2602
2603 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2604 root_al, &branch_from, true, symbols);
2605 if (err)
2606 goto error;
2607
2608 if (stitched_lbr) {
2609 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2610 if (err)
2611 goto error;
2612 }
2613
2614 } else {
2615 if (stitched_lbr) {
2616 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2617 if (err)
2618 goto error;
2619 }
2620 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2621 root_al, &branch_from, false, symbols);
2622 if (err)
2623 goto error;
2624
2625 /* Add kernel ip */
2626 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2627 parent, root_al, branch_from,
2628 false, i, symbols);
2629 if (err)
2630 goto error;
2631 }
2632 return 1;
2633
2634error:
2635 return (err < 0) ? err : 0;
2636}
2637
2638static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2639 struct callchain_cursor *cursor,
2640 struct symbol **parent,
2641 struct addr_location *root_al,
2642 u8 *cpumode, int ent, bool symbols)
2643{
2644 int err = 0;
2645
2646 while (--ent >= 0) {
2647 u64 ip = chain->ips[ent];
2648
2649 if (ip >= PERF_CONTEXT_MAX) {
2650 err = add_callchain_ip(thread, cursor, parent,
2651 root_al, cpumode, ip,
2652 false, NULL, NULL, 0, symbols);
2653 break;
2654 }
2655 }
2656 return err;
2657}
2658
2659static u64 get_leaf_frame_caller(struct perf_sample *sample,
2660 struct thread *thread, int usr_idx)
2661{
2662 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2663 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2664 else
2665 return 0;
2666}
2667
2668static int thread__resolve_callchain_sample(struct thread *thread,
2669 struct callchain_cursor *cursor,
2670 struct evsel *evsel,
2671 struct perf_sample *sample,
2672 struct symbol **parent,
2673 struct addr_location *root_al,
2674 int max_stack,
2675 bool symbols)
2676{
2677 struct branch_stack *branch = sample->branch_stack;
2678 struct branch_entry *entries = perf_sample__branch_entries(sample);
2679 struct ip_callchain *chain = sample->callchain;
2680 int chain_nr = 0;
2681 u8 cpumode = PERF_RECORD_MISC_USER;
2682 int i, j, err, nr_entries, usr_idx;
2683 int skip_idx = -1;
2684 int first_call = 0;
2685 u64 leaf_frame_caller;
2686
2687 if (chain)
2688 chain_nr = chain->nr;
2689
2690 if (evsel__has_branch_callstack(evsel)) {
2691 struct perf_env *env = evsel__env(evsel);
2692
2693 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2694 root_al, max_stack,
2695 !env ? 0 : env->max_branches,
2696 symbols);
2697 if (err)
2698 return (err < 0) ? err : 0;
2699 }
2700
2701 /*
2702 * Based on DWARF debug information, some architectures skip
2703 * a callchain entry saved by the kernel.
2704 */
2705 skip_idx = arch_skip_callchain_idx(thread, chain);
2706
2707 /*
2708 * Add branches to call stack for easier browsing. This gives
2709 * more context for a sample than just the callers.
2710 *
2711 * This uses individual histograms of paths compared to the
2712 * aggregated histograms the normal LBR mode uses.
2713 *
2714 * Limitations for now:
2715 * - No extra filters
2716 * - No annotations (should annotate somehow)
2717 */
2718
2719 if (branch && callchain_param.branch_callstack) {
2720 int nr = min(max_stack, (int)branch->nr);
2721 struct branch_entry be[nr];
2722 struct iterations iter[nr];
2723
2724 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2725 pr_warning("corrupted branch chain. skipping...\n");
2726 goto check_calls;
2727 }
2728
2729 for (i = 0; i < nr; i++) {
2730 if (callchain_param.order == ORDER_CALLEE) {
2731 be[i] = entries[i];
2732
2733 if (chain == NULL)
2734 continue;
2735
2736 /*
2737 * Check for overlap into the callchain.
2738 * The return address is one off compared to
2739 * the branch entry. To adjust for this
2740 * assume the calling instruction is not longer
2741 * than 8 bytes.
2742 */
2743 if (i == skip_idx ||
2744 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2745 first_call++;
2746 else if (be[i].from < chain->ips[first_call] &&
2747 be[i].from >= chain->ips[first_call] - 8)
2748 first_call++;
2749 } else
2750 be[i] = entries[branch->nr - i - 1];
2751 }
2752
2753 memset(iter, 0, sizeof(struct iterations) * nr);
2754 nr = remove_loops(be, nr, iter);
2755
2756 for (i = 0; i < nr; i++) {
2757 err = add_callchain_ip(thread, cursor, parent,
2758 root_al,
2759 NULL, be[i].to,
2760 true, &be[i].flags,
2761 NULL, be[i].from, symbols);
2762
2763 if (!err) {
2764 err = add_callchain_ip(thread, cursor, parent, root_al,
2765 NULL, be[i].from,
2766 true, &be[i].flags,
2767 &iter[i], 0, symbols);
2768 }
2769 if (err == -EINVAL)
2770 break;
2771 if (err)
2772 return err;
2773 }
2774
2775 if (chain_nr == 0)
2776 return 0;
2777
2778 chain_nr -= nr;
2779 }
2780
2781check_calls:
2782 if (chain && callchain_param.order != ORDER_CALLEE) {
2783 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2784 &cpumode, chain->nr - first_call, symbols);
2785 if (err)
2786 return (err < 0) ? err : 0;
2787 }
2788 for (i = first_call, nr_entries = 0;
2789 i < chain_nr && nr_entries < max_stack; i++) {
2790 u64 ip;
2791
2792 if (callchain_param.order == ORDER_CALLEE)
2793 j = i;
2794 else
2795 j = chain->nr - i - 1;
2796
2797#ifdef HAVE_SKIP_CALLCHAIN_IDX
2798 if (j == skip_idx)
2799 continue;
2800#endif
2801 ip = chain->ips[j];
2802 if (ip < PERF_CONTEXT_MAX)
2803 ++nr_entries;
2804 else if (callchain_param.order != ORDER_CALLEE) {
2805 err = find_prev_cpumode(chain, thread, cursor, parent,
2806 root_al, &cpumode, j, symbols);
2807 if (err)
2808 return (err < 0) ? err : 0;
2809 continue;
2810 }
2811
2812 /*
2813 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
2814 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
2815 * the index will be different in order to add the missing frame
2816 * at the right place.
2817 */
2818
2819 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
2820
2821 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
2822
2823 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
2824
2825 /*
2826 * check if leaf_frame_Caller != ip to not add the same
2827 * value twice.
2828 */
2829
2830 if (leaf_frame_caller && leaf_frame_caller != ip) {
2831
2832 err = add_callchain_ip(thread, cursor, parent,
2833 root_al, &cpumode, leaf_frame_caller,
2834 false, NULL, NULL, 0, symbols);
2835 if (err)
2836 return (err < 0) ? err : 0;
2837 }
2838 }
2839
2840 err = add_callchain_ip(thread, cursor, parent,
2841 root_al, &cpumode, ip,
2842 false, NULL, NULL, 0, symbols);
2843
2844 if (err)
2845 return (err < 0) ? err : 0;
2846 }
2847
2848 return 0;
2849}
2850
2851static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2852{
2853 struct symbol *sym = ms->sym;
2854 struct map *map = ms->map;
2855 struct inline_node *inline_node;
2856 struct inline_list *ilist;
2857 struct dso *dso;
2858 u64 addr;
2859 int ret = 1;
2860 struct map_symbol ilist_ms;
2861
2862 if (!symbol_conf.inline_name || !map || !sym)
2863 return ret;
2864
2865 addr = map__dso_map_ip(map, ip);
2866 addr = map__rip_2objdump(map, addr);
2867 dso = map__dso(map);
2868
2869 inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr);
2870 if (!inline_node) {
2871 inline_node = dso__parse_addr_inlines(dso, addr, sym);
2872 if (!inline_node)
2873 return ret;
2874 inlines__tree_insert(dso__inlined_nodes(dso), inline_node);
2875 }
2876
2877 ilist_ms = (struct map_symbol) {
2878 .maps = maps__get(ms->maps),
2879 .map = map__get(map),
2880 };
2881 list_for_each_entry(ilist, &inline_node->val, list) {
2882 ilist_ms.sym = ilist->symbol;
2883 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2884 NULL, 0, 0, 0, ilist->srcline);
2885
2886 if (ret != 0)
2887 return ret;
2888 }
2889 map_symbol__exit(&ilist_ms);
2890
2891 return ret;
2892}
2893
2894static int unwind_entry(struct unwind_entry *entry, void *arg)
2895{
2896 struct callchain_cursor *cursor = arg;
2897 const char *srcline = NULL;
2898 u64 addr = entry->ip;
2899
2900 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2901 return 0;
2902
2903 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2904 return 0;
2905
2906 /*
2907 * Convert entry->ip from a virtual address to an offset in
2908 * its corresponding binary.
2909 */
2910 if (entry->ms.map)
2911 addr = map__dso_map_ip(entry->ms.map, entry->ip);
2912
2913 srcline = callchain_srcline(&entry->ms, addr);
2914 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2915 false, NULL, 0, 0, 0, srcline);
2916}
2917
2918static int thread__resolve_callchain_unwind(struct thread *thread,
2919 struct callchain_cursor *cursor,
2920 struct evsel *evsel,
2921 struct perf_sample *sample,
2922 int max_stack, bool symbols)
2923{
2924 /* Can we do dwarf post unwind? */
2925 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2926 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2927 return 0;
2928
2929 /* Bail out if nothing was captured. */
2930 if ((!sample->user_regs.regs) ||
2931 (!sample->user_stack.size))
2932 return 0;
2933
2934 if (!symbols)
2935 pr_debug("Not resolving symbols with an unwinder isn't currently supported\n");
2936
2937 return unwind__get_entries(unwind_entry, cursor,
2938 thread, sample, max_stack, false);
2939}
2940
2941int __thread__resolve_callchain(struct thread *thread,
2942 struct callchain_cursor *cursor,
2943 struct evsel *evsel,
2944 struct perf_sample *sample,
2945 struct symbol **parent,
2946 struct addr_location *root_al,
2947 int max_stack,
2948 bool symbols)
2949{
2950 int ret = 0;
2951
2952 if (cursor == NULL)
2953 return -ENOMEM;
2954
2955 callchain_cursor_reset(cursor);
2956
2957 if (callchain_param.order == ORDER_CALLEE) {
2958 ret = thread__resolve_callchain_sample(thread, cursor,
2959 evsel, sample,
2960 parent, root_al,
2961 max_stack, symbols);
2962 if (ret)
2963 return ret;
2964 ret = thread__resolve_callchain_unwind(thread, cursor,
2965 evsel, sample,
2966 max_stack, symbols);
2967 } else {
2968 ret = thread__resolve_callchain_unwind(thread, cursor,
2969 evsel, sample,
2970 max_stack, symbols);
2971 if (ret)
2972 return ret;
2973 ret = thread__resolve_callchain_sample(thread, cursor,
2974 evsel, sample,
2975 parent, root_al,
2976 max_stack, symbols);
2977 }
2978
2979 return ret;
2980}
2981
2982int machine__for_each_thread(struct machine *machine,
2983 int (*fn)(struct thread *thread, void *p),
2984 void *priv)
2985{
2986 return threads__for_each_thread(&machine->threads, fn, priv);
2987}
2988
2989int machines__for_each_thread(struct machines *machines,
2990 int (*fn)(struct thread *thread, void *p),
2991 void *priv)
2992{
2993 struct rb_node *nd;
2994 int rc = 0;
2995
2996 rc = machine__for_each_thread(&machines->host, fn, priv);
2997 if (rc != 0)
2998 return rc;
2999
3000 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3001 struct machine *machine = rb_entry(nd, struct machine, rb_node);
3002
3003 rc = machine__for_each_thread(machine, fn, priv);
3004 if (rc != 0)
3005 return rc;
3006 }
3007 return rc;
3008}
3009
3010
3011static int thread_list_cb(struct thread *thread, void *data)
3012{
3013 struct list_head *list = data;
3014 struct thread_list *entry = malloc(sizeof(*entry));
3015
3016 if (!entry)
3017 return -ENOMEM;
3018
3019 entry->thread = thread__get(thread);
3020 list_add_tail(&entry->list, list);
3021 return 0;
3022}
3023
3024int machine__thread_list(struct machine *machine, struct list_head *list)
3025{
3026 return machine__for_each_thread(machine, thread_list_cb, list);
3027}
3028
3029void thread_list__delete(struct list_head *list)
3030{
3031 struct thread_list *pos, *next;
3032
3033 list_for_each_entry_safe(pos, next, list, list) {
3034 thread__zput(pos->thread);
3035 list_del(&pos->list);
3036 free(pos);
3037 }
3038}
3039
3040pid_t machine__get_current_tid(struct machine *machine, int cpu)
3041{
3042 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3043 return -1;
3044
3045 return machine->current_tid[cpu];
3046}
3047
3048int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3049 pid_t tid)
3050{
3051 struct thread *thread;
3052 const pid_t init_val = -1;
3053
3054 if (cpu < 0)
3055 return -EINVAL;
3056
3057 if (realloc_array_as_needed(machine->current_tid,
3058 machine->current_tid_sz,
3059 (unsigned int)cpu,
3060 &init_val))
3061 return -ENOMEM;
3062
3063 machine->current_tid[cpu] = tid;
3064
3065 thread = machine__findnew_thread(machine, pid, tid);
3066 if (!thread)
3067 return -ENOMEM;
3068
3069 thread__set_cpu(thread, cpu);
3070 thread__put(thread);
3071
3072 return 0;
3073}
3074
3075/*
3076 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3077 * machine__normalized_is() if a normalized arch is needed.
3078 */
3079bool machine__is(struct machine *machine, const char *arch)
3080{
3081 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3082}
3083
3084bool machine__normalized_is(struct machine *machine, const char *arch)
3085{
3086 return machine && !strcmp(perf_env__arch(machine->env), arch);
3087}
3088
3089int machine__nr_cpus_avail(struct machine *machine)
3090{
3091 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3092}
3093
3094int machine__get_kernel_start(struct machine *machine)
3095{
3096 struct map *map = machine__kernel_map(machine);
3097 int err = 0;
3098
3099 /*
3100 * The only addresses above 2^63 are kernel addresses of a 64-bit
3101 * kernel. Note that addresses are unsigned so that on a 32-bit system
3102 * all addresses including kernel addresses are less than 2^32. In
3103 * that case (32-bit system), if the kernel mapping is unknown, all
3104 * addresses will be assumed to be in user space - see
3105 * machine__kernel_ip().
3106 */
3107 machine->kernel_start = 1ULL << 63;
3108 if (map) {
3109 err = map__load(map);
3110 /*
3111 * On x86_64, PTI entry trampolines are less than the
3112 * start of kernel text, but still above 2^63. So leave
3113 * kernel_start = 1ULL << 63 for x86_64.
3114 */
3115 if (!err && !machine__is(machine, "x86_64"))
3116 machine->kernel_start = map__start(map);
3117 }
3118 return err;
3119}
3120
3121u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3122{
3123 u8 addr_cpumode = cpumode;
3124 bool kernel_ip;
3125
3126 if (!machine->single_address_space)
3127 goto out;
3128
3129 kernel_ip = machine__kernel_ip(machine, addr);
3130 switch (cpumode) {
3131 case PERF_RECORD_MISC_KERNEL:
3132 case PERF_RECORD_MISC_USER:
3133 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3134 PERF_RECORD_MISC_USER;
3135 break;
3136 case PERF_RECORD_MISC_GUEST_KERNEL:
3137 case PERF_RECORD_MISC_GUEST_USER:
3138 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3139 PERF_RECORD_MISC_GUEST_USER;
3140 break;
3141 default:
3142 break;
3143 }
3144out:
3145 return addr_cpumode;
3146}
3147
3148struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename,
3149 const struct dso_id *id)
3150{
3151 return dsos__findnew_id(&machine->dsos, filename, id);
3152}
3153
3154struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3155{
3156 return machine__findnew_dso_id(machine, filename, NULL);
3157}
3158
3159char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3160{
3161 struct machine *machine = vmachine;
3162 struct map *map;
3163 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3164
3165 if (sym == NULL)
3166 return NULL;
3167
3168 *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL;
3169 *addrp = map__unmap_ip(map, sym->start);
3170 return sym->name;
3171}
3172
3173struct machine__for_each_dso_cb_args {
3174 struct machine *machine;
3175 machine__dso_t fn;
3176 void *priv;
3177};
3178
3179static int machine__for_each_dso_cb(struct dso *dso, void *data)
3180{
3181 struct machine__for_each_dso_cb_args *args = data;
3182
3183 return args->fn(dso, args->machine, args->priv);
3184}
3185
3186int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3187{
3188 struct machine__for_each_dso_cb_args args = {
3189 .machine = machine,
3190 .fn = fn,
3191 .priv = priv,
3192 };
3193
3194 return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args);
3195}
3196
3197int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3198{
3199 struct maps *maps = machine__kernel_maps(machine);
3200
3201 return maps__for_each_map(maps, fn, priv);
3202}
3203
3204bool machine__is_lock_function(struct machine *machine, u64 addr)
3205{
3206 if (!machine->sched.text_start) {
3207 struct map *kmap;
3208 struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3209
3210 if (!sym) {
3211 /* to avoid retry */
3212 machine->sched.text_start = 1;
3213 return false;
3214 }
3215
3216 machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3217
3218 /* should not fail from here */
3219 sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3220 machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3221
3222 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3223 machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3224
3225 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3226 machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3227
3228 sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap);
3229 if (sym) {
3230 machine->traceiter.text_start = map__unmap_ip(kmap, sym->start);
3231 machine->traceiter.text_end = map__unmap_ip(kmap, sym->end);
3232 }
3233 sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap);
3234 if (sym) {
3235 machine->trace.text_start = map__unmap_ip(kmap, sym->start);
3236 machine->trace.text_end = map__unmap_ip(kmap, sym->end);
3237 }
3238 }
3239
3240 /* failed to get kernel symbols */
3241 if (machine->sched.text_start == 1)
3242 return false;
3243
3244 /* mutex and rwsem functions are in sched text section */
3245 if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3246 return true;
3247
3248 /* spinlock functions are in lock text section */
3249 if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3250 return true;
3251
3252 /* traceiter functions currently don't have their own section
3253 * but we consider them lock functions
3254 */
3255 if (machine->traceiter.text_start != 0) {
3256 if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end)
3257 return true;
3258 }
3259
3260 if (machine->trace.text_start != 0) {
3261 if (machine->trace.text_start <= addr && addr < machine->trace.text_end)
3262 return true;
3263 }
3264
3265 return false;
3266}
3267
3268int machine__hit_all_dsos(struct machine *machine)
3269{
3270 return dsos__hit_all(&machine->dsos);
3271}