Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <inttypes.h>
5#include <regex.h>
6#include "callchain.h"
7#include "debug.h"
8#include "event.h"
9#include "evsel.h"
10#include "hist.h"
11#include "machine.h"
12#include "map.h"
13#include "sort.h"
14#include "strlist.h"
15#include "thread.h"
16#include "vdso.h"
17#include <stdbool.h>
18#include <sys/types.h>
19#include <sys/stat.h>
20#include <unistd.h>
21#include "unwind.h"
22#include "linux/hash.h"
23#include "asm/bug.h"
24
25#include "sane_ctype.h"
26#include <symbol/kallsyms.h>
27
28static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
29
30static void dsos__init(struct dsos *dsos)
31{
32 INIT_LIST_HEAD(&dsos->head);
33 dsos->root = RB_ROOT;
34 init_rwsem(&dsos->lock);
35}
36
37static void machine__threads_init(struct machine *machine)
38{
39 int i;
40
41 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
42 struct threads *threads = &machine->threads[i];
43 threads->entries = RB_ROOT;
44 init_rwsem(&threads->lock);
45 threads->nr = 0;
46 INIT_LIST_HEAD(&threads->dead);
47 threads->last_match = NULL;
48 }
49}
50
51static int machine__set_mmap_name(struct machine *machine)
52{
53 if (machine__is_host(machine))
54 machine->mmap_name = strdup("[kernel.kallsyms]");
55 else if (machine__is_default_guest(machine))
56 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
57 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
58 machine->pid) < 0)
59 machine->mmap_name = NULL;
60
61 return machine->mmap_name ? 0 : -ENOMEM;
62}
63
64int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
65{
66 int err = -ENOMEM;
67
68 memset(machine, 0, sizeof(*machine));
69 map_groups__init(&machine->kmaps, machine);
70 RB_CLEAR_NODE(&machine->rb_node);
71 dsos__init(&machine->dsos);
72
73 machine__threads_init(machine);
74
75 machine->vdso_info = NULL;
76 machine->env = NULL;
77
78 machine->pid = pid;
79
80 machine->id_hdr_size = 0;
81 machine->kptr_restrict_warned = false;
82 machine->comm_exec = false;
83 machine->kernel_start = 0;
84
85 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
86
87 machine->root_dir = strdup(root_dir);
88 if (machine->root_dir == NULL)
89 return -ENOMEM;
90
91 if (machine__set_mmap_name(machine))
92 goto out;
93
94 if (pid != HOST_KERNEL_ID) {
95 struct thread *thread = machine__findnew_thread(machine, -1,
96 pid);
97 char comm[64];
98
99 if (thread == NULL)
100 goto out;
101
102 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
103 thread__set_comm(thread, comm, 0);
104 thread__put(thread);
105 }
106
107 machine->current_tid = NULL;
108 err = 0;
109
110out:
111 if (err) {
112 zfree(&machine->root_dir);
113 zfree(&machine->mmap_name);
114 }
115 return 0;
116}
117
118struct machine *machine__new_host(void)
119{
120 struct machine *machine = malloc(sizeof(*machine));
121
122 if (machine != NULL) {
123 machine__init(machine, "", HOST_KERNEL_ID);
124
125 if (machine__create_kernel_maps(machine) < 0)
126 goto out_delete;
127 }
128
129 return machine;
130out_delete:
131 free(machine);
132 return NULL;
133}
134
135struct machine *machine__new_kallsyms(void)
136{
137 struct machine *machine = machine__new_host();
138 /*
139 * FIXME:
140 * 1) MAP__FUNCTION will go away when we stop loading separate maps for
141 * functions and data objects.
142 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
143 * ask for not using the kcore parsing code, once this one is fixed
144 * to create a map per module.
145 */
146 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) {
147 machine__delete(machine);
148 machine = NULL;
149 }
150
151 return machine;
152}
153
154static void dsos__purge(struct dsos *dsos)
155{
156 struct dso *pos, *n;
157
158 down_write(&dsos->lock);
159
160 list_for_each_entry_safe(pos, n, &dsos->head, node) {
161 RB_CLEAR_NODE(&pos->rb_node);
162 pos->root = NULL;
163 list_del_init(&pos->node);
164 dso__put(pos);
165 }
166
167 up_write(&dsos->lock);
168}
169
170static void dsos__exit(struct dsos *dsos)
171{
172 dsos__purge(dsos);
173 exit_rwsem(&dsos->lock);
174}
175
176void machine__delete_threads(struct machine *machine)
177{
178 struct rb_node *nd;
179 int i;
180
181 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
182 struct threads *threads = &machine->threads[i];
183 down_write(&threads->lock);
184 nd = rb_first(&threads->entries);
185 while (nd) {
186 struct thread *t = rb_entry(nd, struct thread, rb_node);
187
188 nd = rb_next(nd);
189 __machine__remove_thread(machine, t, false);
190 }
191 up_write(&threads->lock);
192 }
193}
194
195void machine__exit(struct machine *machine)
196{
197 int i;
198
199 if (machine == NULL)
200 return;
201
202 machine__destroy_kernel_maps(machine);
203 map_groups__exit(&machine->kmaps);
204 dsos__exit(&machine->dsos);
205 machine__exit_vdso(machine);
206 zfree(&machine->root_dir);
207 zfree(&machine->mmap_name);
208 zfree(&machine->current_tid);
209
210 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
211 struct threads *threads = &machine->threads[i];
212 exit_rwsem(&threads->lock);
213 }
214}
215
216void machine__delete(struct machine *machine)
217{
218 if (machine) {
219 machine__exit(machine);
220 free(machine);
221 }
222}
223
224void machines__init(struct machines *machines)
225{
226 machine__init(&machines->host, "", HOST_KERNEL_ID);
227 machines->guests = RB_ROOT;
228}
229
230void machines__exit(struct machines *machines)
231{
232 machine__exit(&machines->host);
233 /* XXX exit guest */
234}
235
236struct machine *machines__add(struct machines *machines, pid_t pid,
237 const char *root_dir)
238{
239 struct rb_node **p = &machines->guests.rb_node;
240 struct rb_node *parent = NULL;
241 struct machine *pos, *machine = malloc(sizeof(*machine));
242
243 if (machine == NULL)
244 return NULL;
245
246 if (machine__init(machine, root_dir, pid) != 0) {
247 free(machine);
248 return NULL;
249 }
250
251 while (*p != NULL) {
252 parent = *p;
253 pos = rb_entry(parent, struct machine, rb_node);
254 if (pid < pos->pid)
255 p = &(*p)->rb_left;
256 else
257 p = &(*p)->rb_right;
258 }
259
260 rb_link_node(&machine->rb_node, parent, p);
261 rb_insert_color(&machine->rb_node, &machines->guests);
262
263 return machine;
264}
265
266void machines__set_comm_exec(struct machines *machines, bool comm_exec)
267{
268 struct rb_node *nd;
269
270 machines->host.comm_exec = comm_exec;
271
272 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
273 struct machine *machine = rb_entry(nd, struct machine, rb_node);
274
275 machine->comm_exec = comm_exec;
276 }
277}
278
279struct machine *machines__find(struct machines *machines, pid_t pid)
280{
281 struct rb_node **p = &machines->guests.rb_node;
282 struct rb_node *parent = NULL;
283 struct machine *machine;
284 struct machine *default_machine = NULL;
285
286 if (pid == HOST_KERNEL_ID)
287 return &machines->host;
288
289 while (*p != NULL) {
290 parent = *p;
291 machine = rb_entry(parent, struct machine, rb_node);
292 if (pid < machine->pid)
293 p = &(*p)->rb_left;
294 else if (pid > machine->pid)
295 p = &(*p)->rb_right;
296 else
297 return machine;
298 if (!machine->pid)
299 default_machine = machine;
300 }
301
302 return default_machine;
303}
304
305struct machine *machines__findnew(struct machines *machines, pid_t pid)
306{
307 char path[PATH_MAX];
308 const char *root_dir = "";
309 struct machine *machine = machines__find(machines, pid);
310
311 if (machine && (machine->pid == pid))
312 goto out;
313
314 if ((pid != HOST_KERNEL_ID) &&
315 (pid != DEFAULT_GUEST_KERNEL_ID) &&
316 (symbol_conf.guestmount)) {
317 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
318 if (access(path, R_OK)) {
319 static struct strlist *seen;
320
321 if (!seen)
322 seen = strlist__new(NULL, NULL);
323
324 if (!strlist__has_entry(seen, path)) {
325 pr_err("Can't access file %s\n", path);
326 strlist__add(seen, path);
327 }
328 machine = NULL;
329 goto out;
330 }
331 root_dir = path;
332 }
333
334 machine = machines__add(machines, pid, root_dir);
335out:
336 return machine;
337}
338
339void machines__process_guests(struct machines *machines,
340 machine__process_t process, void *data)
341{
342 struct rb_node *nd;
343
344 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
345 struct machine *pos = rb_entry(nd, struct machine, rb_node);
346 process(pos, data);
347 }
348}
349
350void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
351{
352 struct rb_node *node;
353 struct machine *machine;
354
355 machines->host.id_hdr_size = id_hdr_size;
356
357 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
358 machine = rb_entry(node, struct machine, rb_node);
359 machine->id_hdr_size = id_hdr_size;
360 }
361
362 return;
363}
364
365static void machine__update_thread_pid(struct machine *machine,
366 struct thread *th, pid_t pid)
367{
368 struct thread *leader;
369
370 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
371 return;
372
373 th->pid_ = pid;
374
375 if (th->pid_ == th->tid)
376 return;
377
378 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
379 if (!leader)
380 goto out_err;
381
382 if (!leader->mg)
383 leader->mg = map_groups__new(machine);
384
385 if (!leader->mg)
386 goto out_err;
387
388 if (th->mg == leader->mg)
389 return;
390
391 if (th->mg) {
392 /*
393 * Maps are created from MMAP events which provide the pid and
394 * tid. Consequently there never should be any maps on a thread
395 * with an unknown pid. Just print an error if there are.
396 */
397 if (!map_groups__empty(th->mg))
398 pr_err("Discarding thread maps for %d:%d\n",
399 th->pid_, th->tid);
400 map_groups__put(th->mg);
401 }
402
403 th->mg = map_groups__get(leader->mg);
404out_put:
405 thread__put(leader);
406 return;
407out_err:
408 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
409 goto out_put;
410}
411
412/*
413 * Caller must eventually drop thread->refcnt returned with a successful
414 * lookup/new thread inserted.
415 */
416static struct thread *____machine__findnew_thread(struct machine *machine,
417 struct threads *threads,
418 pid_t pid, pid_t tid,
419 bool create)
420{
421 struct rb_node **p = &threads->entries.rb_node;
422 struct rb_node *parent = NULL;
423 struct thread *th;
424
425 /*
426 * Front-end cache - TID lookups come in blocks,
427 * so most of the time we dont have to look up
428 * the full rbtree:
429 */
430 th = threads->last_match;
431 if (th != NULL) {
432 if (th->tid == tid) {
433 machine__update_thread_pid(machine, th, pid);
434 return thread__get(th);
435 }
436
437 threads->last_match = NULL;
438 }
439
440 while (*p != NULL) {
441 parent = *p;
442 th = rb_entry(parent, struct thread, rb_node);
443
444 if (th->tid == tid) {
445 threads->last_match = th;
446 machine__update_thread_pid(machine, th, pid);
447 return thread__get(th);
448 }
449
450 if (tid < th->tid)
451 p = &(*p)->rb_left;
452 else
453 p = &(*p)->rb_right;
454 }
455
456 if (!create)
457 return NULL;
458
459 th = thread__new(pid, tid);
460 if (th != NULL) {
461 rb_link_node(&th->rb_node, parent, p);
462 rb_insert_color(&th->rb_node, &threads->entries);
463
464 /*
465 * We have to initialize map_groups separately
466 * after rb tree is updated.
467 *
468 * The reason is that we call machine__findnew_thread
469 * within thread__init_map_groups to find the thread
470 * leader and that would screwed the rb tree.
471 */
472 if (thread__init_map_groups(th, machine)) {
473 rb_erase_init(&th->rb_node, &threads->entries);
474 RB_CLEAR_NODE(&th->rb_node);
475 thread__put(th);
476 return NULL;
477 }
478 /*
479 * It is now in the rbtree, get a ref
480 */
481 thread__get(th);
482 threads->last_match = th;
483 ++threads->nr;
484 }
485
486 return th;
487}
488
489struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
490{
491 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
492}
493
494struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
495 pid_t tid)
496{
497 struct threads *threads = machine__threads(machine, tid);
498 struct thread *th;
499
500 down_write(&threads->lock);
501 th = __machine__findnew_thread(machine, pid, tid);
502 up_write(&threads->lock);
503 return th;
504}
505
506struct thread *machine__find_thread(struct machine *machine, pid_t pid,
507 pid_t tid)
508{
509 struct threads *threads = machine__threads(machine, tid);
510 struct thread *th;
511
512 down_read(&threads->lock);
513 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
514 up_read(&threads->lock);
515 return th;
516}
517
518struct comm *machine__thread_exec_comm(struct machine *machine,
519 struct thread *thread)
520{
521 if (machine->comm_exec)
522 return thread__exec_comm(thread);
523 else
524 return thread__comm(thread);
525}
526
527int machine__process_comm_event(struct machine *machine, union perf_event *event,
528 struct perf_sample *sample)
529{
530 struct thread *thread = machine__findnew_thread(machine,
531 event->comm.pid,
532 event->comm.tid);
533 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
534 int err = 0;
535
536 if (exec)
537 machine->comm_exec = true;
538
539 if (dump_trace)
540 perf_event__fprintf_comm(event, stdout);
541
542 if (thread == NULL ||
543 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
544 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
545 err = -1;
546 }
547
548 thread__put(thread);
549
550 return err;
551}
552
553int machine__process_namespaces_event(struct machine *machine __maybe_unused,
554 union perf_event *event,
555 struct perf_sample *sample __maybe_unused)
556{
557 struct thread *thread = machine__findnew_thread(machine,
558 event->namespaces.pid,
559 event->namespaces.tid);
560 int err = 0;
561
562 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
563 "\nWARNING: kernel seems to support more namespaces than perf"
564 " tool.\nTry updating the perf tool..\n\n");
565
566 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
567 "\nWARNING: perf tool seems to support more namespaces than"
568 " the kernel.\nTry updating the kernel..\n\n");
569
570 if (dump_trace)
571 perf_event__fprintf_namespaces(event, stdout);
572
573 if (thread == NULL ||
574 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
575 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
576 err = -1;
577 }
578
579 thread__put(thread);
580
581 return err;
582}
583
584int machine__process_lost_event(struct machine *machine __maybe_unused,
585 union perf_event *event, struct perf_sample *sample __maybe_unused)
586{
587 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
588 event->lost.id, event->lost.lost);
589 return 0;
590}
591
592int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
593 union perf_event *event, struct perf_sample *sample)
594{
595 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
596 sample->id, event->lost_samples.lost);
597 return 0;
598}
599
600static struct dso *machine__findnew_module_dso(struct machine *machine,
601 struct kmod_path *m,
602 const char *filename)
603{
604 struct dso *dso;
605
606 down_write(&machine->dsos.lock);
607
608 dso = __dsos__find(&machine->dsos, m->name, true);
609 if (!dso) {
610 dso = __dsos__addnew(&machine->dsos, m->name);
611 if (dso == NULL)
612 goto out_unlock;
613
614 dso__set_module_info(dso, m, machine);
615 dso__set_long_name(dso, strdup(filename), true);
616 }
617
618 dso__get(dso);
619out_unlock:
620 up_write(&machine->dsos.lock);
621 return dso;
622}
623
624int machine__process_aux_event(struct machine *machine __maybe_unused,
625 union perf_event *event)
626{
627 if (dump_trace)
628 perf_event__fprintf_aux(event, stdout);
629 return 0;
630}
631
632int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
633 union perf_event *event)
634{
635 if (dump_trace)
636 perf_event__fprintf_itrace_start(event, stdout);
637 return 0;
638}
639
640int machine__process_switch_event(struct machine *machine __maybe_unused,
641 union perf_event *event)
642{
643 if (dump_trace)
644 perf_event__fprintf_switch(event, stdout);
645 return 0;
646}
647
648static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
649{
650 const char *dup_filename;
651
652 if (!filename || !dso || !dso->long_name)
653 return;
654 if (dso->long_name[0] != '[')
655 return;
656 if (!strchr(filename, '/'))
657 return;
658
659 dup_filename = strdup(filename);
660 if (!dup_filename)
661 return;
662
663 dso__set_long_name(dso, dup_filename, true);
664}
665
666struct map *machine__findnew_module_map(struct machine *machine, u64 start,
667 const char *filename)
668{
669 struct map *map = NULL;
670 struct dso *dso = NULL;
671 struct kmod_path m;
672
673 if (kmod_path__parse_name(&m, filename))
674 return NULL;
675
676 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
677 m.name);
678 if (map) {
679 /*
680 * If the map's dso is an offline module, give dso__load()
681 * a chance to find the file path of that module by fixing
682 * long_name.
683 */
684 dso__adjust_kmod_long_name(map->dso, filename);
685 goto out;
686 }
687
688 dso = machine__findnew_module_dso(machine, &m, filename);
689 if (dso == NULL)
690 goto out;
691
692 map = map__new2(start, dso, MAP__FUNCTION);
693 if (map == NULL)
694 goto out;
695
696 map_groups__insert(&machine->kmaps, map);
697
698 /* Put the map here because map_groups__insert alread got it */
699 map__put(map);
700out:
701 /* put the dso here, corresponding to machine__findnew_module_dso */
702 dso__put(dso);
703 free(m.name);
704 return map;
705}
706
707size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
708{
709 struct rb_node *nd;
710 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
711
712 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
713 struct machine *pos = rb_entry(nd, struct machine, rb_node);
714 ret += __dsos__fprintf(&pos->dsos.head, fp);
715 }
716
717 return ret;
718}
719
720size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
721 bool (skip)(struct dso *dso, int parm), int parm)
722{
723 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
724}
725
726size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
727 bool (skip)(struct dso *dso, int parm), int parm)
728{
729 struct rb_node *nd;
730 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
731
732 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
733 struct machine *pos = rb_entry(nd, struct machine, rb_node);
734 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
735 }
736 return ret;
737}
738
739size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
740{
741 int i;
742 size_t printed = 0;
743 struct dso *kdso = machine__kernel_map(machine)->dso;
744
745 if (kdso->has_build_id) {
746 char filename[PATH_MAX];
747 if (dso__build_id_filename(kdso, filename, sizeof(filename),
748 false))
749 printed += fprintf(fp, "[0] %s\n", filename);
750 }
751
752 for (i = 0; i < vmlinux_path__nr_entries; ++i)
753 printed += fprintf(fp, "[%d] %s\n",
754 i + kdso->has_build_id, vmlinux_path[i]);
755
756 return printed;
757}
758
759size_t machine__fprintf(struct machine *machine, FILE *fp)
760{
761 struct rb_node *nd;
762 size_t ret;
763 int i;
764
765 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
766 struct threads *threads = &machine->threads[i];
767
768 down_read(&threads->lock);
769
770 ret = fprintf(fp, "Threads: %u\n", threads->nr);
771
772 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
773 struct thread *pos = rb_entry(nd, struct thread, rb_node);
774
775 ret += thread__fprintf(pos, fp);
776 }
777
778 up_read(&threads->lock);
779 }
780 return ret;
781}
782
783static struct dso *machine__get_kernel(struct machine *machine)
784{
785 const char *vmlinux_name = machine->mmap_name;
786 struct dso *kernel;
787
788 if (machine__is_host(machine)) {
789 if (symbol_conf.vmlinux_name)
790 vmlinux_name = symbol_conf.vmlinux_name;
791
792 kernel = machine__findnew_kernel(machine, vmlinux_name,
793 "[kernel]", DSO_TYPE_KERNEL);
794 } else {
795 if (symbol_conf.default_guest_vmlinux_name)
796 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
797
798 kernel = machine__findnew_kernel(machine, vmlinux_name,
799 "[guest.kernel]",
800 DSO_TYPE_GUEST_KERNEL);
801 }
802
803 if (kernel != NULL && (!kernel->has_build_id))
804 dso__read_running_kernel_build_id(kernel, machine);
805
806 return kernel;
807}
808
809struct process_args {
810 u64 start;
811};
812
813static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
814 size_t bufsz)
815{
816 if (machine__is_default_guest(machine))
817 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
818 else
819 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
820}
821
822const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
823
824/* Figure out the start address of kernel map from /proc/kallsyms.
825 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
826 * symbol_name if it's not that important.
827 */
828static int machine__get_running_kernel_start(struct machine *machine,
829 const char **symbol_name, u64 *start)
830{
831 char filename[PATH_MAX];
832 int i, err = -1;
833 const char *name;
834 u64 addr = 0;
835
836 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
837
838 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
839 return 0;
840
841 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
842 err = kallsyms__get_function_start(filename, name, &addr);
843 if (!err)
844 break;
845 }
846
847 if (err)
848 return -1;
849
850 if (symbol_name)
851 *symbol_name = name;
852
853 *start = addr;
854 return 0;
855}
856
857static int
858__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
859{
860 int type;
861
862 /* In case of renewal the kernel map, destroy previous one */
863 machine__destroy_kernel_maps(machine);
864
865 for (type = 0; type < MAP__NR_TYPES; ++type) {
866 struct kmap *kmap;
867 struct map *map;
868
869 machine->vmlinux_maps[type] = map__new2(0, kernel, type);
870 if (machine->vmlinux_maps[type] == NULL)
871 return -1;
872
873 machine->vmlinux_maps[type]->map_ip =
874 machine->vmlinux_maps[type]->unmap_ip =
875 identity__map_ip;
876 map = __machine__kernel_map(machine, type);
877 kmap = map__kmap(map);
878 if (!kmap)
879 return -1;
880
881 kmap->kmaps = &machine->kmaps;
882 map_groups__insert(&machine->kmaps, map);
883 }
884
885 return 0;
886}
887
888void machine__destroy_kernel_maps(struct machine *machine)
889{
890 int type;
891
892 for (type = 0; type < MAP__NR_TYPES; ++type) {
893 struct kmap *kmap;
894 struct map *map = __machine__kernel_map(machine, type);
895
896 if (map == NULL)
897 continue;
898
899 kmap = map__kmap(map);
900 map_groups__remove(&machine->kmaps, map);
901 if (kmap && kmap->ref_reloc_sym) {
902 /*
903 * ref_reloc_sym is shared among all maps, so free just
904 * on one of them.
905 */
906 if (type == MAP__FUNCTION) {
907 zfree((char **)&kmap->ref_reloc_sym->name);
908 zfree(&kmap->ref_reloc_sym);
909 } else
910 kmap->ref_reloc_sym = NULL;
911 }
912
913 map__put(machine->vmlinux_maps[type]);
914 machine->vmlinux_maps[type] = NULL;
915 }
916}
917
918int machines__create_guest_kernel_maps(struct machines *machines)
919{
920 int ret = 0;
921 struct dirent **namelist = NULL;
922 int i, items = 0;
923 char path[PATH_MAX];
924 pid_t pid;
925 char *endp;
926
927 if (symbol_conf.default_guest_vmlinux_name ||
928 symbol_conf.default_guest_modules ||
929 symbol_conf.default_guest_kallsyms) {
930 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
931 }
932
933 if (symbol_conf.guestmount) {
934 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
935 if (items <= 0)
936 return -ENOENT;
937 for (i = 0; i < items; i++) {
938 if (!isdigit(namelist[i]->d_name[0])) {
939 /* Filter out . and .. */
940 continue;
941 }
942 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
943 if ((*endp != '\0') ||
944 (endp == namelist[i]->d_name) ||
945 (errno == ERANGE)) {
946 pr_debug("invalid directory (%s). Skipping.\n",
947 namelist[i]->d_name);
948 continue;
949 }
950 sprintf(path, "%s/%s/proc/kallsyms",
951 symbol_conf.guestmount,
952 namelist[i]->d_name);
953 ret = access(path, R_OK);
954 if (ret) {
955 pr_debug("Can't access file %s\n", path);
956 goto failure;
957 }
958 machines__create_kernel_maps(machines, pid);
959 }
960failure:
961 free(namelist);
962 }
963
964 return ret;
965}
966
967void machines__destroy_kernel_maps(struct machines *machines)
968{
969 struct rb_node *next = rb_first(&machines->guests);
970
971 machine__destroy_kernel_maps(&machines->host);
972
973 while (next) {
974 struct machine *pos = rb_entry(next, struct machine, rb_node);
975
976 next = rb_next(&pos->rb_node);
977 rb_erase(&pos->rb_node, &machines->guests);
978 machine__delete(pos);
979 }
980}
981
982int machines__create_kernel_maps(struct machines *machines, pid_t pid)
983{
984 struct machine *machine = machines__findnew(machines, pid);
985
986 if (machine == NULL)
987 return -1;
988
989 return machine__create_kernel_maps(machine);
990}
991
992int machine__load_kallsyms(struct machine *machine, const char *filename,
993 enum map_type type)
994{
995 struct map *map = machine__kernel_map(machine);
996 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
997
998 if (ret > 0) {
999 dso__set_loaded(map->dso, type);
1000 /*
1001 * Since /proc/kallsyms will have multiple sessions for the
1002 * kernel, with modules between them, fixup the end of all
1003 * sections.
1004 */
1005 __map_groups__fixup_end(&machine->kmaps, type);
1006 }
1007
1008 return ret;
1009}
1010
1011int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
1012{
1013 struct map *map = machine__kernel_map(machine);
1014 int ret = dso__load_vmlinux_path(map->dso, map);
1015
1016 if (ret > 0)
1017 dso__set_loaded(map->dso, type);
1018
1019 return ret;
1020}
1021
1022static char *get_kernel_version(const char *root_dir)
1023{
1024 char version[PATH_MAX];
1025 FILE *file;
1026 char *name, *tmp;
1027 const char *prefix = "Linux version ";
1028
1029 sprintf(version, "%s/proc/version", root_dir);
1030 file = fopen(version, "r");
1031 if (!file)
1032 return NULL;
1033
1034 version[0] = '\0';
1035 tmp = fgets(version, sizeof(version), file);
1036 fclose(file);
1037
1038 name = strstr(version, prefix);
1039 if (!name)
1040 return NULL;
1041 name += strlen(prefix);
1042 tmp = strchr(name, ' ');
1043 if (tmp)
1044 *tmp = '\0';
1045
1046 return strdup(name);
1047}
1048
1049static bool is_kmod_dso(struct dso *dso)
1050{
1051 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1052 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1053}
1054
1055static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1056 struct kmod_path *m)
1057{
1058 struct map *map;
1059 char *long_name;
1060
1061 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
1062 if (map == NULL)
1063 return 0;
1064
1065 long_name = strdup(path);
1066 if (long_name == NULL)
1067 return -ENOMEM;
1068
1069 dso__set_long_name(map->dso, long_name, true);
1070 dso__kernel_module_get_build_id(map->dso, "");
1071
1072 /*
1073 * Full name could reveal us kmod compression, so
1074 * we need to update the symtab_type if needed.
1075 */
1076 if (m->comp && is_kmod_dso(map->dso))
1077 map->dso->symtab_type++;
1078
1079 return 0;
1080}
1081
1082static int map_groups__set_modules_path_dir(struct map_groups *mg,
1083 const char *dir_name, int depth)
1084{
1085 struct dirent *dent;
1086 DIR *dir = opendir(dir_name);
1087 int ret = 0;
1088
1089 if (!dir) {
1090 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1091 return -1;
1092 }
1093
1094 while ((dent = readdir(dir)) != NULL) {
1095 char path[PATH_MAX];
1096 struct stat st;
1097
1098 /*sshfs might return bad dent->d_type, so we have to stat*/
1099 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1100 if (stat(path, &st))
1101 continue;
1102
1103 if (S_ISDIR(st.st_mode)) {
1104 if (!strcmp(dent->d_name, ".") ||
1105 !strcmp(dent->d_name, ".."))
1106 continue;
1107
1108 /* Do not follow top-level source and build symlinks */
1109 if (depth == 0) {
1110 if (!strcmp(dent->d_name, "source") ||
1111 !strcmp(dent->d_name, "build"))
1112 continue;
1113 }
1114
1115 ret = map_groups__set_modules_path_dir(mg, path,
1116 depth + 1);
1117 if (ret < 0)
1118 goto out;
1119 } else {
1120 struct kmod_path m;
1121
1122 ret = kmod_path__parse_name(&m, dent->d_name);
1123 if (ret)
1124 goto out;
1125
1126 if (m.kmod)
1127 ret = map_groups__set_module_path(mg, path, &m);
1128
1129 free(m.name);
1130
1131 if (ret)
1132 goto out;
1133 }
1134 }
1135
1136out:
1137 closedir(dir);
1138 return ret;
1139}
1140
1141static int machine__set_modules_path(struct machine *machine)
1142{
1143 char *version;
1144 char modules_path[PATH_MAX];
1145
1146 version = get_kernel_version(machine->root_dir);
1147 if (!version)
1148 return -1;
1149
1150 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1151 machine->root_dir, version);
1152 free(version);
1153
1154 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1155}
1156int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1157 const char *name __maybe_unused)
1158{
1159 return 0;
1160}
1161
1162static int machine__create_module(void *arg, const char *name, u64 start,
1163 u64 size)
1164{
1165 struct machine *machine = arg;
1166 struct map *map;
1167
1168 if (arch__fix_module_text_start(&start, name) < 0)
1169 return -1;
1170
1171 map = machine__findnew_module_map(machine, start, name);
1172 if (map == NULL)
1173 return -1;
1174 map->end = start + size;
1175
1176 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1177
1178 return 0;
1179}
1180
1181static int machine__create_modules(struct machine *machine)
1182{
1183 const char *modules;
1184 char path[PATH_MAX];
1185
1186 if (machine__is_default_guest(machine)) {
1187 modules = symbol_conf.default_guest_modules;
1188 } else {
1189 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1190 modules = path;
1191 }
1192
1193 if (symbol__restricted_filename(modules, "/proc/modules"))
1194 return -1;
1195
1196 if (modules__parse(modules, machine, machine__create_module))
1197 return -1;
1198
1199 if (!machine__set_modules_path(machine))
1200 return 0;
1201
1202 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1203
1204 return 0;
1205}
1206
1207static void machine__set_kernel_mmap(struct machine *machine,
1208 u64 start, u64 end)
1209{
1210 int i;
1211
1212 for (i = 0; i < MAP__NR_TYPES; i++) {
1213 machine->vmlinux_maps[i]->start = start;
1214 machine->vmlinux_maps[i]->end = end;
1215
1216 /*
1217 * Be a bit paranoid here, some perf.data file came with
1218 * a zero sized synthesized MMAP event for the kernel.
1219 */
1220 if (start == 0 && end == 0)
1221 machine->vmlinux_maps[i]->end = ~0ULL;
1222 }
1223}
1224
1225int machine__create_kernel_maps(struct machine *machine)
1226{
1227 struct dso *kernel = machine__get_kernel(machine);
1228 const char *name = NULL;
1229 struct map *map;
1230 u64 addr = 0;
1231 int ret;
1232
1233 if (kernel == NULL)
1234 return -1;
1235
1236 ret = __machine__create_kernel_maps(machine, kernel);
1237 dso__put(kernel);
1238 if (ret < 0)
1239 return -1;
1240
1241 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1242 if (machine__is_host(machine))
1243 pr_debug("Problems creating module maps, "
1244 "continuing anyway...\n");
1245 else
1246 pr_debug("Problems creating module maps for guest %d, "
1247 "continuing anyway...\n", machine->pid);
1248 }
1249
1250 if (!machine__get_running_kernel_start(machine, &name, &addr)) {
1251 if (name &&
1252 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
1253 machine__destroy_kernel_maps(machine);
1254 return -1;
1255 }
1256
1257 /* we have a real start address now, so re-order the kmaps */
1258 map = machine__kernel_map(machine);
1259
1260 map__get(map);
1261 map_groups__remove(&machine->kmaps, map);
1262
1263 /* assume it's the last in the kmaps */
1264 machine__set_kernel_mmap(machine, addr, ~0ULL);
1265
1266 map_groups__insert(&machine->kmaps, map);
1267 map__put(map);
1268 }
1269
1270 /* update end address of the kernel map using adjacent module address */
1271 map = map__next(machine__kernel_map(machine));
1272 if (map)
1273 machine__set_kernel_mmap(machine, addr, map->start);
1274
1275 return 0;
1276}
1277
1278static bool machine__uses_kcore(struct machine *machine)
1279{
1280 struct dso *dso;
1281
1282 list_for_each_entry(dso, &machine->dsos.head, node) {
1283 if (dso__is_kcore(dso))
1284 return true;
1285 }
1286
1287 return false;
1288}
1289
1290static int machine__process_kernel_mmap_event(struct machine *machine,
1291 union perf_event *event)
1292{
1293 struct map *map;
1294 enum dso_kernel_type kernel_type;
1295 bool is_kernel_mmap;
1296
1297 /* If we have maps from kcore then we do not need or want any others */
1298 if (machine__uses_kcore(machine))
1299 return 0;
1300
1301 if (machine__is_host(machine))
1302 kernel_type = DSO_TYPE_KERNEL;
1303 else
1304 kernel_type = DSO_TYPE_GUEST_KERNEL;
1305
1306 is_kernel_mmap = memcmp(event->mmap.filename,
1307 machine->mmap_name,
1308 strlen(machine->mmap_name) - 1) == 0;
1309 if (event->mmap.filename[0] == '/' ||
1310 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1311 map = machine__findnew_module_map(machine, event->mmap.start,
1312 event->mmap.filename);
1313 if (map == NULL)
1314 goto out_problem;
1315
1316 map->end = map->start + event->mmap.len;
1317 } else if (is_kernel_mmap) {
1318 const char *symbol_name = (event->mmap.filename +
1319 strlen(machine->mmap_name));
1320 /*
1321 * Should be there already, from the build-id table in
1322 * the header.
1323 */
1324 struct dso *kernel = NULL;
1325 struct dso *dso;
1326
1327 down_read(&machine->dsos.lock);
1328
1329 list_for_each_entry(dso, &machine->dsos.head, node) {
1330
1331 /*
1332 * The cpumode passed to is_kernel_module is not the
1333 * cpumode of *this* event. If we insist on passing
1334 * correct cpumode to is_kernel_module, we should
1335 * record the cpumode when we adding this dso to the
1336 * linked list.
1337 *
1338 * However we don't really need passing correct
1339 * cpumode. We know the correct cpumode must be kernel
1340 * mode (if not, we should not link it onto kernel_dsos
1341 * list).
1342 *
1343 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1344 * is_kernel_module() treats it as a kernel cpumode.
1345 */
1346
1347 if (!dso->kernel ||
1348 is_kernel_module(dso->long_name,
1349 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1350 continue;
1351
1352
1353 kernel = dso;
1354 break;
1355 }
1356
1357 up_read(&machine->dsos.lock);
1358
1359 if (kernel == NULL)
1360 kernel = machine__findnew_dso(machine, machine->mmap_name);
1361 if (kernel == NULL)
1362 goto out_problem;
1363
1364 kernel->kernel = kernel_type;
1365 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1366 dso__put(kernel);
1367 goto out_problem;
1368 }
1369
1370 if (strstr(kernel->long_name, "vmlinux"))
1371 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1372
1373 machine__set_kernel_mmap(machine, event->mmap.start,
1374 event->mmap.start + event->mmap.len);
1375
1376 /*
1377 * Avoid using a zero address (kptr_restrict) for the ref reloc
1378 * symbol. Effectively having zero here means that at record
1379 * time /proc/sys/kernel/kptr_restrict was non zero.
1380 */
1381 if (event->mmap.pgoff != 0) {
1382 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1383 symbol_name,
1384 event->mmap.pgoff);
1385 }
1386
1387 if (machine__is_default_guest(machine)) {
1388 /*
1389 * preload dso of guest kernel and modules
1390 */
1391 dso__load(kernel, machine__kernel_map(machine));
1392 }
1393 }
1394 return 0;
1395out_problem:
1396 return -1;
1397}
1398
1399int machine__process_mmap2_event(struct machine *machine,
1400 union perf_event *event,
1401 struct perf_sample *sample)
1402{
1403 struct thread *thread;
1404 struct map *map;
1405 enum map_type type;
1406 int ret = 0;
1407
1408 if (dump_trace)
1409 perf_event__fprintf_mmap2(event, stdout);
1410
1411 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1412 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1413 ret = machine__process_kernel_mmap_event(machine, event);
1414 if (ret < 0)
1415 goto out_problem;
1416 return 0;
1417 }
1418
1419 thread = machine__findnew_thread(machine, event->mmap2.pid,
1420 event->mmap2.tid);
1421 if (thread == NULL)
1422 goto out_problem;
1423
1424 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1425 type = MAP__VARIABLE;
1426 else
1427 type = MAP__FUNCTION;
1428
1429 map = map__new(machine, event->mmap2.start,
1430 event->mmap2.len, event->mmap2.pgoff,
1431 event->mmap2.maj,
1432 event->mmap2.min, event->mmap2.ino,
1433 event->mmap2.ino_generation,
1434 event->mmap2.prot,
1435 event->mmap2.flags,
1436 event->mmap2.filename, type, thread);
1437
1438 if (map == NULL)
1439 goto out_problem_map;
1440
1441 ret = thread__insert_map(thread, map);
1442 if (ret)
1443 goto out_problem_insert;
1444
1445 thread__put(thread);
1446 map__put(map);
1447 return 0;
1448
1449out_problem_insert:
1450 map__put(map);
1451out_problem_map:
1452 thread__put(thread);
1453out_problem:
1454 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1455 return 0;
1456}
1457
1458int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1459 struct perf_sample *sample)
1460{
1461 struct thread *thread;
1462 struct map *map;
1463 enum map_type type;
1464 int ret = 0;
1465
1466 if (dump_trace)
1467 perf_event__fprintf_mmap(event, stdout);
1468
1469 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1470 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1471 ret = machine__process_kernel_mmap_event(machine, event);
1472 if (ret < 0)
1473 goto out_problem;
1474 return 0;
1475 }
1476
1477 thread = machine__findnew_thread(machine, event->mmap.pid,
1478 event->mmap.tid);
1479 if (thread == NULL)
1480 goto out_problem;
1481
1482 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1483 type = MAP__VARIABLE;
1484 else
1485 type = MAP__FUNCTION;
1486
1487 map = map__new(machine, event->mmap.start,
1488 event->mmap.len, event->mmap.pgoff,
1489 0, 0, 0, 0, 0, 0,
1490 event->mmap.filename,
1491 type, thread);
1492
1493 if (map == NULL)
1494 goto out_problem_map;
1495
1496 ret = thread__insert_map(thread, map);
1497 if (ret)
1498 goto out_problem_insert;
1499
1500 thread__put(thread);
1501 map__put(map);
1502 return 0;
1503
1504out_problem_insert:
1505 map__put(map);
1506out_problem_map:
1507 thread__put(thread);
1508out_problem:
1509 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1510 return 0;
1511}
1512
1513static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1514{
1515 struct threads *threads = machine__threads(machine, th->tid);
1516
1517 if (threads->last_match == th)
1518 threads->last_match = NULL;
1519
1520 BUG_ON(refcount_read(&th->refcnt) == 0);
1521 if (lock)
1522 down_write(&threads->lock);
1523 rb_erase_init(&th->rb_node, &threads->entries);
1524 RB_CLEAR_NODE(&th->rb_node);
1525 --threads->nr;
1526 /*
1527 * Move it first to the dead_threads list, then drop the reference,
1528 * if this is the last reference, then the thread__delete destructor
1529 * will be called and we will remove it from the dead_threads list.
1530 */
1531 list_add_tail(&th->node, &threads->dead);
1532 if (lock)
1533 up_write(&threads->lock);
1534 thread__put(th);
1535}
1536
1537void machine__remove_thread(struct machine *machine, struct thread *th)
1538{
1539 return __machine__remove_thread(machine, th, true);
1540}
1541
1542int machine__process_fork_event(struct machine *machine, union perf_event *event,
1543 struct perf_sample *sample)
1544{
1545 struct thread *thread = machine__find_thread(machine,
1546 event->fork.pid,
1547 event->fork.tid);
1548 struct thread *parent = machine__findnew_thread(machine,
1549 event->fork.ppid,
1550 event->fork.ptid);
1551 int err = 0;
1552
1553 if (dump_trace)
1554 perf_event__fprintf_task(event, stdout);
1555
1556 /*
1557 * There may be an existing thread that is not actually the parent,
1558 * either because we are processing events out of order, or because the
1559 * (fork) event that would have removed the thread was lost. Assume the
1560 * latter case and continue on as best we can.
1561 */
1562 if (parent->pid_ != (pid_t)event->fork.ppid) {
1563 dump_printf("removing erroneous parent thread %d/%d\n",
1564 parent->pid_, parent->tid);
1565 machine__remove_thread(machine, parent);
1566 thread__put(parent);
1567 parent = machine__findnew_thread(machine, event->fork.ppid,
1568 event->fork.ptid);
1569 }
1570
1571 /* if a thread currently exists for the thread id remove it */
1572 if (thread != NULL) {
1573 machine__remove_thread(machine, thread);
1574 thread__put(thread);
1575 }
1576
1577 thread = machine__findnew_thread(machine, event->fork.pid,
1578 event->fork.tid);
1579
1580 if (thread == NULL || parent == NULL ||
1581 thread__fork(thread, parent, sample->time) < 0) {
1582 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1583 err = -1;
1584 }
1585 thread__put(thread);
1586 thread__put(parent);
1587
1588 return err;
1589}
1590
1591int machine__process_exit_event(struct machine *machine, union perf_event *event,
1592 struct perf_sample *sample __maybe_unused)
1593{
1594 struct thread *thread = machine__find_thread(machine,
1595 event->fork.pid,
1596 event->fork.tid);
1597
1598 if (dump_trace)
1599 perf_event__fprintf_task(event, stdout);
1600
1601 if (thread != NULL) {
1602 thread__exited(thread);
1603 thread__put(thread);
1604 }
1605
1606 return 0;
1607}
1608
1609int machine__process_event(struct machine *machine, union perf_event *event,
1610 struct perf_sample *sample)
1611{
1612 int ret;
1613
1614 switch (event->header.type) {
1615 case PERF_RECORD_COMM:
1616 ret = machine__process_comm_event(machine, event, sample); break;
1617 case PERF_RECORD_MMAP:
1618 ret = machine__process_mmap_event(machine, event, sample); break;
1619 case PERF_RECORD_NAMESPACES:
1620 ret = machine__process_namespaces_event(machine, event, sample); break;
1621 case PERF_RECORD_MMAP2:
1622 ret = machine__process_mmap2_event(machine, event, sample); break;
1623 case PERF_RECORD_FORK:
1624 ret = machine__process_fork_event(machine, event, sample); break;
1625 case PERF_RECORD_EXIT:
1626 ret = machine__process_exit_event(machine, event, sample); break;
1627 case PERF_RECORD_LOST:
1628 ret = machine__process_lost_event(machine, event, sample); break;
1629 case PERF_RECORD_AUX:
1630 ret = machine__process_aux_event(machine, event); break;
1631 case PERF_RECORD_ITRACE_START:
1632 ret = machine__process_itrace_start_event(machine, event); break;
1633 case PERF_RECORD_LOST_SAMPLES:
1634 ret = machine__process_lost_samples_event(machine, event, sample); break;
1635 case PERF_RECORD_SWITCH:
1636 case PERF_RECORD_SWITCH_CPU_WIDE:
1637 ret = machine__process_switch_event(machine, event); break;
1638 default:
1639 ret = -1;
1640 break;
1641 }
1642
1643 return ret;
1644}
1645
1646static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1647{
1648 if (!regexec(regex, sym->name, 0, NULL, 0))
1649 return 1;
1650 return 0;
1651}
1652
1653static void ip__resolve_ams(struct thread *thread,
1654 struct addr_map_symbol *ams,
1655 u64 ip)
1656{
1657 struct addr_location al;
1658
1659 memset(&al, 0, sizeof(al));
1660 /*
1661 * We cannot use the header.misc hint to determine whether a
1662 * branch stack address is user, kernel, guest, hypervisor.
1663 * Branches may straddle the kernel/user/hypervisor boundaries.
1664 * Thus, we have to try consecutively until we find a match
1665 * or else, the symbol is unknown
1666 */
1667 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1668
1669 ams->addr = ip;
1670 ams->al_addr = al.addr;
1671 ams->sym = al.sym;
1672 ams->map = al.map;
1673 ams->phys_addr = 0;
1674}
1675
1676static void ip__resolve_data(struct thread *thread,
1677 u8 m, struct addr_map_symbol *ams,
1678 u64 addr, u64 phys_addr)
1679{
1680 struct addr_location al;
1681
1682 memset(&al, 0, sizeof(al));
1683
1684 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1685 if (al.map == NULL) {
1686 /*
1687 * some shared data regions have execute bit set which puts
1688 * their mapping in the MAP__FUNCTION type array.
1689 * Check there as a fallback option before dropping the sample.
1690 */
1691 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1692 }
1693
1694 ams->addr = addr;
1695 ams->al_addr = al.addr;
1696 ams->sym = al.sym;
1697 ams->map = al.map;
1698 ams->phys_addr = phys_addr;
1699}
1700
1701struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1702 struct addr_location *al)
1703{
1704 struct mem_info *mi = mem_info__new();
1705
1706 if (!mi)
1707 return NULL;
1708
1709 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1710 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
1711 sample->addr, sample->phys_addr);
1712 mi->data_src.val = sample->data_src;
1713
1714 return mi;
1715}
1716
1717static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
1718{
1719 char *srcline = NULL;
1720
1721 if (!map || callchain_param.key == CCKEY_FUNCTION)
1722 return srcline;
1723
1724 srcline = srcline__tree_find(&map->dso->srclines, ip);
1725 if (!srcline) {
1726 bool show_sym = false;
1727 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1728
1729 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
1730 sym, show_sym, show_addr, ip);
1731 srcline__tree_insert(&map->dso->srclines, ip, srcline);
1732 }
1733
1734 return srcline;
1735}
1736
1737struct iterations {
1738 int nr_loop_iter;
1739 u64 cycles;
1740};
1741
1742static int add_callchain_ip(struct thread *thread,
1743 struct callchain_cursor *cursor,
1744 struct symbol **parent,
1745 struct addr_location *root_al,
1746 u8 *cpumode,
1747 u64 ip,
1748 bool branch,
1749 struct branch_flags *flags,
1750 struct iterations *iter,
1751 u64 branch_from)
1752{
1753 struct addr_location al;
1754 int nr_loop_iter = 0;
1755 u64 iter_cycles = 0;
1756 const char *srcline = NULL;
1757
1758 al.filtered = 0;
1759 al.sym = NULL;
1760 if (!cpumode) {
1761 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1762 ip, &al);
1763 } else {
1764 if (ip >= PERF_CONTEXT_MAX) {
1765 switch (ip) {
1766 case PERF_CONTEXT_HV:
1767 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
1768 break;
1769 case PERF_CONTEXT_KERNEL:
1770 *cpumode = PERF_RECORD_MISC_KERNEL;
1771 break;
1772 case PERF_CONTEXT_USER:
1773 *cpumode = PERF_RECORD_MISC_USER;
1774 break;
1775 default:
1776 pr_debug("invalid callchain context: "
1777 "%"PRId64"\n", (s64) ip);
1778 /*
1779 * It seems the callchain is corrupted.
1780 * Discard all.
1781 */
1782 callchain_cursor_reset(cursor);
1783 return 1;
1784 }
1785 return 0;
1786 }
1787 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1788 ip, &al);
1789 }
1790
1791 if (al.sym != NULL) {
1792 if (perf_hpp_list.parent && !*parent &&
1793 symbol__match_regex(al.sym, &parent_regex))
1794 *parent = al.sym;
1795 else if (have_ignore_callees && root_al &&
1796 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1797 /* Treat this symbol as the root,
1798 forgetting its callees. */
1799 *root_al = al;
1800 callchain_cursor_reset(cursor);
1801 }
1802 }
1803
1804 if (symbol_conf.hide_unresolved && al.sym == NULL)
1805 return 0;
1806
1807 if (iter) {
1808 nr_loop_iter = iter->nr_loop_iter;
1809 iter_cycles = iter->cycles;
1810 }
1811
1812 srcline = callchain_srcline(al.map, al.sym, al.addr);
1813 return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1814 branch, flags, nr_loop_iter,
1815 iter_cycles, branch_from, srcline);
1816}
1817
1818struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1819 struct addr_location *al)
1820{
1821 unsigned int i;
1822 const struct branch_stack *bs = sample->branch_stack;
1823 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1824
1825 if (!bi)
1826 return NULL;
1827
1828 for (i = 0; i < bs->nr; i++) {
1829 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1830 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1831 bi[i].flags = bs->entries[i].flags;
1832 }
1833 return bi;
1834}
1835
1836static void save_iterations(struct iterations *iter,
1837 struct branch_entry *be, int nr)
1838{
1839 int i;
1840
1841 iter->nr_loop_iter = nr;
1842 iter->cycles = 0;
1843
1844 for (i = 0; i < nr; i++)
1845 iter->cycles += be[i].flags.cycles;
1846}
1847
1848#define CHASHSZ 127
1849#define CHASHBITS 7
1850#define NO_ENTRY 0xff
1851
1852#define PERF_MAX_BRANCH_DEPTH 127
1853
1854/* Remove loops. */
1855static int remove_loops(struct branch_entry *l, int nr,
1856 struct iterations *iter)
1857{
1858 int i, j, off;
1859 unsigned char chash[CHASHSZ];
1860
1861 memset(chash, NO_ENTRY, sizeof(chash));
1862
1863 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1864
1865 for (i = 0; i < nr; i++) {
1866 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1867
1868 /* no collision handling for now */
1869 if (chash[h] == NO_ENTRY) {
1870 chash[h] = i;
1871 } else if (l[chash[h]].from == l[i].from) {
1872 bool is_loop = true;
1873 /* check if it is a real loop */
1874 off = 0;
1875 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1876 if (l[j].from != l[i + off].from) {
1877 is_loop = false;
1878 break;
1879 }
1880 if (is_loop) {
1881 j = nr - (i + off);
1882 if (j > 0) {
1883 save_iterations(iter + i + off,
1884 l + i, off);
1885
1886 memmove(iter + i, iter + i + off,
1887 j * sizeof(*iter));
1888
1889 memmove(l + i, l + i + off,
1890 j * sizeof(*l));
1891 }
1892
1893 nr -= off;
1894 }
1895 }
1896 }
1897 return nr;
1898}
1899
1900/*
1901 * Recolve LBR callstack chain sample
1902 * Return:
1903 * 1 on success get LBR callchain information
1904 * 0 no available LBR callchain information, should try fp
1905 * negative error code on other errors.
1906 */
1907static int resolve_lbr_callchain_sample(struct thread *thread,
1908 struct callchain_cursor *cursor,
1909 struct perf_sample *sample,
1910 struct symbol **parent,
1911 struct addr_location *root_al,
1912 int max_stack)
1913{
1914 struct ip_callchain *chain = sample->callchain;
1915 int chain_nr = min(max_stack, (int)chain->nr), i;
1916 u8 cpumode = PERF_RECORD_MISC_USER;
1917 u64 ip, branch_from = 0;
1918
1919 for (i = 0; i < chain_nr; i++) {
1920 if (chain->ips[i] == PERF_CONTEXT_USER)
1921 break;
1922 }
1923
1924 /* LBR only affects the user callchain */
1925 if (i != chain_nr) {
1926 struct branch_stack *lbr_stack = sample->branch_stack;
1927 int lbr_nr = lbr_stack->nr, j, k;
1928 bool branch;
1929 struct branch_flags *flags;
1930 /*
1931 * LBR callstack can only get user call chain.
1932 * The mix_chain_nr is kernel call chain
1933 * number plus LBR user call chain number.
1934 * i is kernel call chain number,
1935 * 1 is PERF_CONTEXT_USER,
1936 * lbr_nr + 1 is the user call chain number.
1937 * For details, please refer to the comments
1938 * in callchain__printf
1939 */
1940 int mix_chain_nr = i + 1 + lbr_nr + 1;
1941
1942 for (j = 0; j < mix_chain_nr; j++) {
1943 int err;
1944 branch = false;
1945 flags = NULL;
1946
1947 if (callchain_param.order == ORDER_CALLEE) {
1948 if (j < i + 1)
1949 ip = chain->ips[j];
1950 else if (j > i + 1) {
1951 k = j - i - 2;
1952 ip = lbr_stack->entries[k].from;
1953 branch = true;
1954 flags = &lbr_stack->entries[k].flags;
1955 } else {
1956 ip = lbr_stack->entries[0].to;
1957 branch = true;
1958 flags = &lbr_stack->entries[0].flags;
1959 branch_from =
1960 lbr_stack->entries[0].from;
1961 }
1962 } else {
1963 if (j < lbr_nr) {
1964 k = lbr_nr - j - 1;
1965 ip = lbr_stack->entries[k].from;
1966 branch = true;
1967 flags = &lbr_stack->entries[k].flags;
1968 }
1969 else if (j > lbr_nr)
1970 ip = chain->ips[i + 1 - (j - lbr_nr)];
1971 else {
1972 ip = lbr_stack->entries[0].to;
1973 branch = true;
1974 flags = &lbr_stack->entries[0].flags;
1975 branch_from =
1976 lbr_stack->entries[0].from;
1977 }
1978 }
1979
1980 err = add_callchain_ip(thread, cursor, parent,
1981 root_al, &cpumode, ip,
1982 branch, flags, NULL,
1983 branch_from);
1984 if (err)
1985 return (err < 0) ? err : 0;
1986 }
1987 return 1;
1988 }
1989
1990 return 0;
1991}
1992
1993static int thread__resolve_callchain_sample(struct thread *thread,
1994 struct callchain_cursor *cursor,
1995 struct perf_evsel *evsel,
1996 struct perf_sample *sample,
1997 struct symbol **parent,
1998 struct addr_location *root_al,
1999 int max_stack)
2000{
2001 struct branch_stack *branch = sample->branch_stack;
2002 struct ip_callchain *chain = sample->callchain;
2003 int chain_nr = 0;
2004 u8 cpumode = PERF_RECORD_MISC_USER;
2005 int i, j, err, nr_entries;
2006 int skip_idx = -1;
2007 int first_call = 0;
2008
2009 if (chain)
2010 chain_nr = chain->nr;
2011
2012 if (perf_evsel__has_branch_callstack(evsel)) {
2013 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2014 root_al, max_stack);
2015 if (err)
2016 return (err < 0) ? err : 0;
2017 }
2018
2019 /*
2020 * Based on DWARF debug information, some architectures skip
2021 * a callchain entry saved by the kernel.
2022 */
2023 skip_idx = arch_skip_callchain_idx(thread, chain);
2024
2025 /*
2026 * Add branches to call stack for easier browsing. This gives
2027 * more context for a sample than just the callers.
2028 *
2029 * This uses individual histograms of paths compared to the
2030 * aggregated histograms the normal LBR mode uses.
2031 *
2032 * Limitations for now:
2033 * - No extra filters
2034 * - No annotations (should annotate somehow)
2035 */
2036
2037 if (branch && callchain_param.branch_callstack) {
2038 int nr = min(max_stack, (int)branch->nr);
2039 struct branch_entry be[nr];
2040 struct iterations iter[nr];
2041
2042 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2043 pr_warning("corrupted branch chain. skipping...\n");
2044 goto check_calls;
2045 }
2046
2047 for (i = 0; i < nr; i++) {
2048 if (callchain_param.order == ORDER_CALLEE) {
2049 be[i] = branch->entries[i];
2050
2051 if (chain == NULL)
2052 continue;
2053
2054 /*
2055 * Check for overlap into the callchain.
2056 * The return address is one off compared to
2057 * the branch entry. To adjust for this
2058 * assume the calling instruction is not longer
2059 * than 8 bytes.
2060 */
2061 if (i == skip_idx ||
2062 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2063 first_call++;
2064 else if (be[i].from < chain->ips[first_call] &&
2065 be[i].from >= chain->ips[first_call] - 8)
2066 first_call++;
2067 } else
2068 be[i] = branch->entries[branch->nr - i - 1];
2069 }
2070
2071 memset(iter, 0, sizeof(struct iterations) * nr);
2072 nr = remove_loops(be, nr, iter);
2073
2074 for (i = 0; i < nr; i++) {
2075 err = add_callchain_ip(thread, cursor, parent,
2076 root_al,
2077 NULL, be[i].to,
2078 true, &be[i].flags,
2079 NULL, be[i].from);
2080
2081 if (!err)
2082 err = add_callchain_ip(thread, cursor, parent, root_al,
2083 NULL, be[i].from,
2084 true, &be[i].flags,
2085 &iter[i], 0);
2086 if (err == -EINVAL)
2087 break;
2088 if (err)
2089 return err;
2090 }
2091
2092 if (chain_nr == 0)
2093 return 0;
2094
2095 chain_nr -= nr;
2096 }
2097
2098check_calls:
2099 for (i = first_call, nr_entries = 0;
2100 i < chain_nr && nr_entries < max_stack; i++) {
2101 u64 ip;
2102
2103 if (callchain_param.order == ORDER_CALLEE)
2104 j = i;
2105 else
2106 j = chain->nr - i - 1;
2107
2108#ifdef HAVE_SKIP_CALLCHAIN_IDX
2109 if (j == skip_idx)
2110 continue;
2111#endif
2112 ip = chain->ips[j];
2113
2114 if (ip < PERF_CONTEXT_MAX)
2115 ++nr_entries;
2116
2117 err = add_callchain_ip(thread, cursor, parent,
2118 root_al, &cpumode, ip,
2119 false, NULL, NULL, 0);
2120
2121 if (err)
2122 return (err < 0) ? err : 0;
2123 }
2124
2125 return 0;
2126}
2127
2128static int append_inlines(struct callchain_cursor *cursor,
2129 struct map *map, struct symbol *sym, u64 ip)
2130{
2131 struct inline_node *inline_node;
2132 struct inline_list *ilist;
2133 u64 addr;
2134 int ret = 1;
2135
2136 if (!symbol_conf.inline_name || !map || !sym)
2137 return ret;
2138
2139 addr = map__rip_2objdump(map, ip);
2140
2141 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2142 if (!inline_node) {
2143 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2144 if (!inline_node)
2145 return ret;
2146 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2147 }
2148
2149 list_for_each_entry(ilist, &inline_node->val, list) {
2150 ret = callchain_cursor_append(cursor, ip, map,
2151 ilist->symbol, false,
2152 NULL, 0, 0, 0, ilist->srcline);
2153
2154 if (ret != 0)
2155 return ret;
2156 }
2157
2158 return ret;
2159}
2160
2161static int unwind_entry(struct unwind_entry *entry, void *arg)
2162{
2163 struct callchain_cursor *cursor = arg;
2164 const char *srcline = NULL;
2165
2166 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2167 return 0;
2168
2169 if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
2170 return 0;
2171
2172 srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
2173 return callchain_cursor_append(cursor, entry->ip,
2174 entry->map, entry->sym,
2175 false, NULL, 0, 0, 0, srcline);
2176}
2177
2178static int thread__resolve_callchain_unwind(struct thread *thread,
2179 struct callchain_cursor *cursor,
2180 struct perf_evsel *evsel,
2181 struct perf_sample *sample,
2182 int max_stack)
2183{
2184 /* Can we do dwarf post unwind? */
2185 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2186 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
2187 return 0;
2188
2189 /* Bail out if nothing was captured. */
2190 if ((!sample->user_regs.regs) ||
2191 (!sample->user_stack.size))
2192 return 0;
2193
2194 return unwind__get_entries(unwind_entry, cursor,
2195 thread, sample, max_stack);
2196}
2197
2198int thread__resolve_callchain(struct thread *thread,
2199 struct callchain_cursor *cursor,
2200 struct perf_evsel *evsel,
2201 struct perf_sample *sample,
2202 struct symbol **parent,
2203 struct addr_location *root_al,
2204 int max_stack)
2205{
2206 int ret = 0;
2207
2208 callchain_cursor_reset(cursor);
2209
2210 if (callchain_param.order == ORDER_CALLEE) {
2211 ret = thread__resolve_callchain_sample(thread, cursor,
2212 evsel, sample,
2213 parent, root_al,
2214 max_stack);
2215 if (ret)
2216 return ret;
2217 ret = thread__resolve_callchain_unwind(thread, cursor,
2218 evsel, sample,
2219 max_stack);
2220 } else {
2221 ret = thread__resolve_callchain_unwind(thread, cursor,
2222 evsel, sample,
2223 max_stack);
2224 if (ret)
2225 return ret;
2226 ret = thread__resolve_callchain_sample(thread, cursor,
2227 evsel, sample,
2228 parent, root_al,
2229 max_stack);
2230 }
2231
2232 return ret;
2233}
2234
2235int machine__for_each_thread(struct machine *machine,
2236 int (*fn)(struct thread *thread, void *p),
2237 void *priv)
2238{
2239 struct threads *threads;
2240 struct rb_node *nd;
2241 struct thread *thread;
2242 int rc = 0;
2243 int i;
2244
2245 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2246 threads = &machine->threads[i];
2247 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
2248 thread = rb_entry(nd, struct thread, rb_node);
2249 rc = fn(thread, priv);
2250 if (rc != 0)
2251 return rc;
2252 }
2253
2254 list_for_each_entry(thread, &threads->dead, node) {
2255 rc = fn(thread, priv);
2256 if (rc != 0)
2257 return rc;
2258 }
2259 }
2260 return rc;
2261}
2262
2263int machines__for_each_thread(struct machines *machines,
2264 int (*fn)(struct thread *thread, void *p),
2265 void *priv)
2266{
2267 struct rb_node *nd;
2268 int rc = 0;
2269
2270 rc = machine__for_each_thread(&machines->host, fn, priv);
2271 if (rc != 0)
2272 return rc;
2273
2274 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
2275 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2276
2277 rc = machine__for_each_thread(machine, fn, priv);
2278 if (rc != 0)
2279 return rc;
2280 }
2281 return rc;
2282}
2283
2284int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2285 struct target *target, struct thread_map *threads,
2286 perf_event__handler_t process, bool data_mmap,
2287 unsigned int proc_map_timeout,
2288 unsigned int nr_threads_synthesize)
2289{
2290 if (target__has_task(target))
2291 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
2292 else if (target__has_cpu(target))
2293 return perf_event__synthesize_threads(tool, process,
2294 machine, data_mmap,
2295 proc_map_timeout,
2296 nr_threads_synthesize);
2297 /* command specified */
2298 return 0;
2299}
2300
2301pid_t machine__get_current_tid(struct machine *machine, int cpu)
2302{
2303 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
2304 return -1;
2305
2306 return machine->current_tid[cpu];
2307}
2308
2309int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2310 pid_t tid)
2311{
2312 struct thread *thread;
2313
2314 if (cpu < 0)
2315 return -EINVAL;
2316
2317 if (!machine->current_tid) {
2318 int i;
2319
2320 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
2321 if (!machine->current_tid)
2322 return -ENOMEM;
2323 for (i = 0; i < MAX_NR_CPUS; i++)
2324 machine->current_tid[i] = -1;
2325 }
2326
2327 if (cpu >= MAX_NR_CPUS) {
2328 pr_err("Requested CPU %d too large. ", cpu);
2329 pr_err("Consider raising MAX_NR_CPUS\n");
2330 return -EINVAL;
2331 }
2332
2333 machine->current_tid[cpu] = tid;
2334
2335 thread = machine__findnew_thread(machine, pid, tid);
2336 if (!thread)
2337 return -ENOMEM;
2338
2339 thread->cpu = cpu;
2340 thread__put(thread);
2341
2342 return 0;
2343}
2344
2345int machine__get_kernel_start(struct machine *machine)
2346{
2347 struct map *map = machine__kernel_map(machine);
2348 int err = 0;
2349
2350 /*
2351 * The only addresses above 2^63 are kernel addresses of a 64-bit
2352 * kernel. Note that addresses are unsigned so that on a 32-bit system
2353 * all addresses including kernel addresses are less than 2^32. In
2354 * that case (32-bit system), if the kernel mapping is unknown, all
2355 * addresses will be assumed to be in user space - see
2356 * machine__kernel_ip().
2357 */
2358 machine->kernel_start = 1ULL << 63;
2359 if (map) {
2360 err = map__load(map);
2361 if (!err)
2362 machine->kernel_start = map->start;
2363 }
2364 return err;
2365}
2366
2367struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2368{
2369 return dsos__findnew(&machine->dsos, filename);
2370}
2371
2372char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2373{
2374 struct machine *machine = vmachine;
2375 struct map *map;
2376 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
2377
2378 if (sym == NULL)
2379 return NULL;
2380
2381 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2382 *addrp = map->unmap_ip(map, sym->start);
2383 return sym->name;
2384}
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <inttypes.h>
5#include <regex.h>
6#include <stdlib.h>
7#include "callchain.h"
8#include "debug.h"
9#include "dso.h"
10#include "env.h"
11#include "event.h"
12#include "evsel.h"
13#include "hist.h"
14#include "machine.h"
15#include "map.h"
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
19#include "mem-info.h"
20#include "path.h"
21#include "srcline.h"
22#include "symbol.h"
23#include "sort.h"
24#include "strlist.h"
25#include "target.h"
26#include "thread.h"
27#include "util.h"
28#include "vdso.h"
29#include <stdbool.h>
30#include <sys/types.h>
31#include <sys/stat.h>
32#include <unistd.h>
33#include "unwind.h"
34#include "linux/hash.h"
35#include "asm/bug.h"
36#include "bpf-event.h"
37#include <internal/lib.h> // page_size
38#include "cgroup.h"
39#include "arm64-frame-pointer-unwind-support.h"
40
41#include <linux/ctype.h>
42#include <symbol/kallsyms.h>
43#include <linux/mman.h>
44#include <linux/string.h>
45#include <linux/zalloc.h>
46
47static struct dso *machine__kernel_dso(struct machine *machine)
48{
49 return map__dso(machine->vmlinux_map);
50}
51
52static int machine__set_mmap_name(struct machine *machine)
53{
54 if (machine__is_host(machine))
55 machine->mmap_name = strdup("[kernel.kallsyms]");
56 else if (machine__is_default_guest(machine))
57 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
58 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
59 machine->pid) < 0)
60 machine->mmap_name = NULL;
61
62 return machine->mmap_name ? 0 : -ENOMEM;
63}
64
65static void thread__set_guest_comm(struct thread *thread, pid_t pid)
66{
67 char comm[64];
68
69 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
70 thread__set_comm(thread, comm, 0);
71}
72
73int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
74{
75 int err = -ENOMEM;
76
77 memset(machine, 0, sizeof(*machine));
78 machine->kmaps = maps__new(machine);
79 if (machine->kmaps == NULL)
80 return -ENOMEM;
81
82 RB_CLEAR_NODE(&machine->rb_node);
83 dsos__init(&machine->dsos);
84
85 threads__init(&machine->threads);
86
87 machine->vdso_info = NULL;
88 machine->env = NULL;
89
90 machine->pid = pid;
91
92 machine->id_hdr_size = 0;
93 machine->kptr_restrict_warned = false;
94 machine->comm_exec = false;
95 machine->kernel_start = 0;
96 machine->vmlinux_map = NULL;
97
98 machine->root_dir = strdup(root_dir);
99 if (machine->root_dir == NULL)
100 goto out;
101
102 if (machine__set_mmap_name(machine))
103 goto out;
104
105 if (pid != HOST_KERNEL_ID) {
106 struct thread *thread = machine__findnew_thread(machine, -1,
107 pid);
108
109 if (thread == NULL)
110 goto out;
111
112 thread__set_guest_comm(thread, pid);
113 thread__put(thread);
114 }
115
116 machine->current_tid = NULL;
117 err = 0;
118
119out:
120 if (err) {
121 zfree(&machine->kmaps);
122 zfree(&machine->root_dir);
123 zfree(&machine->mmap_name);
124 }
125 return 0;
126}
127
128struct machine *machine__new_host(void)
129{
130 struct machine *machine = malloc(sizeof(*machine));
131
132 if (machine != NULL) {
133 machine__init(machine, "", HOST_KERNEL_ID);
134
135 if (machine__create_kernel_maps(machine) < 0)
136 goto out_delete;
137
138 machine->env = &perf_env;
139 }
140
141 return machine;
142out_delete:
143 free(machine);
144 return NULL;
145}
146
147struct machine *machine__new_kallsyms(void)
148{
149 struct machine *machine = machine__new_host();
150 /*
151 * FIXME:
152 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
153 * ask for not using the kcore parsing code, once this one is fixed
154 * to create a map per module.
155 */
156 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
157 machine__delete(machine);
158 machine = NULL;
159 }
160
161 return machine;
162}
163
164void machine__delete_threads(struct machine *machine)
165{
166 threads__remove_all_threads(&machine->threads);
167}
168
169void machine__exit(struct machine *machine)
170{
171 if (machine == NULL)
172 return;
173
174 machine__destroy_kernel_maps(machine);
175 maps__zput(machine->kmaps);
176 dsos__exit(&machine->dsos);
177 machine__exit_vdso(machine);
178 zfree(&machine->root_dir);
179 zfree(&machine->mmap_name);
180 zfree(&machine->current_tid);
181 zfree(&machine->kallsyms_filename);
182
183 threads__exit(&machine->threads);
184}
185
186void machine__delete(struct machine *machine)
187{
188 if (machine) {
189 machine__exit(machine);
190 free(machine);
191 }
192}
193
194void machines__init(struct machines *machines)
195{
196 machine__init(&machines->host, "", HOST_KERNEL_ID);
197 machines->guests = RB_ROOT_CACHED;
198}
199
200void machines__exit(struct machines *machines)
201{
202 machine__exit(&machines->host);
203 /* XXX exit guest */
204}
205
206struct machine *machines__add(struct machines *machines, pid_t pid,
207 const char *root_dir)
208{
209 struct rb_node **p = &machines->guests.rb_root.rb_node;
210 struct rb_node *parent = NULL;
211 struct machine *pos, *machine = malloc(sizeof(*machine));
212 bool leftmost = true;
213
214 if (machine == NULL)
215 return NULL;
216
217 if (machine__init(machine, root_dir, pid) != 0) {
218 free(machine);
219 return NULL;
220 }
221
222 while (*p != NULL) {
223 parent = *p;
224 pos = rb_entry(parent, struct machine, rb_node);
225 if (pid < pos->pid)
226 p = &(*p)->rb_left;
227 else {
228 p = &(*p)->rb_right;
229 leftmost = false;
230 }
231 }
232
233 rb_link_node(&machine->rb_node, parent, p);
234 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
235
236 machine->machines = machines;
237
238 return machine;
239}
240
241void machines__set_comm_exec(struct machines *machines, bool comm_exec)
242{
243 struct rb_node *nd;
244
245 machines->host.comm_exec = comm_exec;
246
247 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
248 struct machine *machine = rb_entry(nd, struct machine, rb_node);
249
250 machine->comm_exec = comm_exec;
251 }
252}
253
254struct machine *machines__find(struct machines *machines, pid_t pid)
255{
256 struct rb_node **p = &machines->guests.rb_root.rb_node;
257 struct rb_node *parent = NULL;
258 struct machine *machine;
259 struct machine *default_machine = NULL;
260
261 if (pid == HOST_KERNEL_ID)
262 return &machines->host;
263
264 while (*p != NULL) {
265 parent = *p;
266 machine = rb_entry(parent, struct machine, rb_node);
267 if (pid < machine->pid)
268 p = &(*p)->rb_left;
269 else if (pid > machine->pid)
270 p = &(*p)->rb_right;
271 else
272 return machine;
273 if (!machine->pid)
274 default_machine = machine;
275 }
276
277 return default_machine;
278}
279
280struct machine *machines__findnew(struct machines *machines, pid_t pid)
281{
282 char path[PATH_MAX];
283 const char *root_dir = "";
284 struct machine *machine = machines__find(machines, pid);
285
286 if (machine && (machine->pid == pid))
287 goto out;
288
289 if ((pid != HOST_KERNEL_ID) &&
290 (pid != DEFAULT_GUEST_KERNEL_ID) &&
291 (symbol_conf.guestmount)) {
292 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
293 if (access(path, R_OK)) {
294 static struct strlist *seen;
295
296 if (!seen)
297 seen = strlist__new(NULL, NULL);
298
299 if (!strlist__has_entry(seen, path)) {
300 pr_err("Can't access file %s\n", path);
301 strlist__add(seen, path);
302 }
303 machine = NULL;
304 goto out;
305 }
306 root_dir = path;
307 }
308
309 machine = machines__add(machines, pid, root_dir);
310out:
311 return machine;
312}
313
314struct machine *machines__find_guest(struct machines *machines, pid_t pid)
315{
316 struct machine *machine = machines__find(machines, pid);
317
318 if (!machine)
319 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
320 return machine;
321}
322
323/*
324 * A common case for KVM test programs is that the test program acts as the
325 * hypervisor, creating, running and destroying the virtual machine, and
326 * providing the guest object code from its own object code. In this case,
327 * the VM is not running an OS, but only the functions loaded into it by the
328 * hypervisor test program, and conveniently, loaded at the same virtual
329 * addresses.
330 *
331 * Normally to resolve addresses, MMAP events are needed to map addresses
332 * back to the object code and debug symbols for that object code.
333 *
334 * Currently, there is no way to get such mapping information from guests
335 * but, in the scenario described above, the guest has the same mappings
336 * as the hypervisor, so support for that scenario can be achieved.
337 *
338 * To support that, copy the host thread's maps to the guest thread's maps.
339 * Note, we do not discover the guest until we encounter a guest event,
340 * which works well because it is not until then that we know that the host
341 * thread's maps have been set up.
342 *
343 * This function returns the guest thread. Apart from keeping the data
344 * structures sane, using a thread belonging to the guest machine, instead
345 * of the host thread, allows it to have its own comm (refer
346 * thread__set_guest_comm()).
347 */
348static struct thread *findnew_guest_code(struct machine *machine,
349 struct machine *host_machine,
350 pid_t pid)
351{
352 struct thread *host_thread;
353 struct thread *thread;
354 int err;
355
356 if (!machine)
357 return NULL;
358
359 thread = machine__findnew_thread(machine, -1, pid);
360 if (!thread)
361 return NULL;
362
363 /* Assume maps are set up if there are any */
364 if (!maps__empty(thread__maps(thread)))
365 return thread;
366
367 host_thread = machine__find_thread(host_machine, -1, pid);
368 if (!host_thread)
369 goto out_err;
370
371 thread__set_guest_comm(thread, pid);
372
373 /*
374 * Guest code can be found in hypervisor process at the same address
375 * so copy host maps.
376 */
377 err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
378 thread__put(host_thread);
379 if (err)
380 goto out_err;
381
382 return thread;
383
384out_err:
385 thread__zput(thread);
386 return NULL;
387}
388
389struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
390{
391 struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
392 struct machine *machine = machines__findnew(machines, pid);
393
394 return findnew_guest_code(machine, host_machine, pid);
395}
396
397struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
398{
399 struct machines *machines = machine->machines;
400 struct machine *host_machine;
401
402 if (!machines)
403 return NULL;
404
405 host_machine = machines__find(machines, HOST_KERNEL_ID);
406
407 return findnew_guest_code(machine, host_machine, pid);
408}
409
410void machines__process_guests(struct machines *machines,
411 machine__process_t process, void *data)
412{
413 struct rb_node *nd;
414
415 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
416 struct machine *pos = rb_entry(nd, struct machine, rb_node);
417 process(pos, data);
418 }
419}
420
421void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
422{
423 struct rb_node *node;
424 struct machine *machine;
425
426 machines->host.id_hdr_size = id_hdr_size;
427
428 for (node = rb_first_cached(&machines->guests); node;
429 node = rb_next(node)) {
430 machine = rb_entry(node, struct machine, rb_node);
431 machine->id_hdr_size = id_hdr_size;
432 }
433
434 return;
435}
436
437static void machine__update_thread_pid(struct machine *machine,
438 struct thread *th, pid_t pid)
439{
440 struct thread *leader;
441
442 if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
443 return;
444
445 thread__set_pid(th, pid);
446
447 if (thread__pid(th) == thread__tid(th))
448 return;
449
450 leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
451 if (!leader)
452 goto out_err;
453
454 if (!thread__maps(leader))
455 thread__set_maps(leader, maps__new(machine));
456
457 if (!thread__maps(leader))
458 goto out_err;
459
460 if (thread__maps(th) == thread__maps(leader))
461 goto out_put;
462
463 if (thread__maps(th)) {
464 /*
465 * Maps are created from MMAP events which provide the pid and
466 * tid. Consequently there never should be any maps on a thread
467 * with an unknown pid. Just print an error if there are.
468 */
469 if (!maps__empty(thread__maps(th)))
470 pr_err("Discarding thread maps for %d:%d\n",
471 thread__pid(th), thread__tid(th));
472 maps__put(thread__maps(th));
473 }
474
475 thread__set_maps(th, maps__get(thread__maps(leader)));
476out_put:
477 thread__put(leader);
478 return;
479out_err:
480 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
481 goto out_put;
482}
483
484/*
485 * Caller must eventually drop thread->refcnt returned with a successful
486 * lookup/new thread inserted.
487 */
488static struct thread *__machine__findnew_thread(struct machine *machine,
489 pid_t pid,
490 pid_t tid,
491 bool create)
492{
493 struct thread *th = threads__find(&machine->threads, tid);
494 bool created;
495
496 if (th) {
497 machine__update_thread_pid(machine, th, pid);
498 return th;
499 }
500 if (!create)
501 return NULL;
502
503 th = threads__findnew(&machine->threads, pid, tid, &created);
504 if (created) {
505 /*
506 * We have to initialize maps separately after rb tree is
507 * updated.
508 *
509 * The reason is that we call machine__findnew_thread within
510 * thread__init_maps to find the thread leader and that would
511 * screwed the rb tree.
512 */
513 if (thread__init_maps(th, machine)) {
514 pr_err("Thread init failed thread %d\n", pid);
515 threads__remove(&machine->threads, th);
516 thread__put(th);
517 return NULL;
518 }
519 } else
520 machine__update_thread_pid(machine, th, pid);
521
522 return th;
523}
524
525struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
526{
527 return __machine__findnew_thread(machine, pid, tid, /*create=*/true);
528}
529
530struct thread *machine__find_thread(struct machine *machine, pid_t pid,
531 pid_t tid)
532{
533 return __machine__findnew_thread(machine, pid, tid, /*create=*/false);
534}
535
536/*
537 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
538 * So here a single thread is created for that, but actually there is a separate
539 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
540 * is only 1. That causes problems for some tools, requiring workarounds. For
541 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
542 */
543struct thread *machine__idle_thread(struct machine *machine)
544{
545 struct thread *thread = machine__findnew_thread(machine, 0, 0);
546
547 if (!thread || thread__set_comm(thread, "swapper", 0) ||
548 thread__set_namespaces(thread, 0, NULL))
549 pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
550
551 return thread;
552}
553
554struct comm *machine__thread_exec_comm(struct machine *machine,
555 struct thread *thread)
556{
557 if (machine->comm_exec)
558 return thread__exec_comm(thread);
559 else
560 return thread__comm(thread);
561}
562
563int machine__process_comm_event(struct machine *machine, union perf_event *event,
564 struct perf_sample *sample)
565{
566 struct thread *thread = machine__findnew_thread(machine,
567 event->comm.pid,
568 event->comm.tid);
569 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
570 int err = 0;
571
572 if (exec)
573 machine->comm_exec = true;
574
575 if (dump_trace)
576 perf_event__fprintf_comm(event, stdout);
577
578 if (thread == NULL ||
579 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
580 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
581 err = -1;
582 }
583
584 thread__put(thread);
585
586 return err;
587}
588
589int machine__process_namespaces_event(struct machine *machine __maybe_unused,
590 union perf_event *event,
591 struct perf_sample *sample __maybe_unused)
592{
593 struct thread *thread = machine__findnew_thread(machine,
594 event->namespaces.pid,
595 event->namespaces.tid);
596 int err = 0;
597
598 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
599 "\nWARNING: kernel seems to support more namespaces than perf"
600 " tool.\nTry updating the perf tool..\n\n");
601
602 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
603 "\nWARNING: perf tool seems to support more namespaces than"
604 " the kernel.\nTry updating the kernel..\n\n");
605
606 if (dump_trace)
607 perf_event__fprintf_namespaces(event, stdout);
608
609 if (thread == NULL ||
610 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
611 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
612 err = -1;
613 }
614
615 thread__put(thread);
616
617 return err;
618}
619
620int machine__process_cgroup_event(struct machine *machine,
621 union perf_event *event,
622 struct perf_sample *sample __maybe_unused)
623{
624 struct cgroup *cgrp;
625
626 if (dump_trace)
627 perf_event__fprintf_cgroup(event, stdout);
628
629 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
630 if (cgrp == NULL)
631 return -ENOMEM;
632
633 return 0;
634}
635
636int machine__process_lost_event(struct machine *machine __maybe_unused,
637 union perf_event *event, struct perf_sample *sample __maybe_unused)
638{
639 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
640 event->lost.id, event->lost.lost);
641 return 0;
642}
643
644int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
645 union perf_event *event, struct perf_sample *sample)
646{
647 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n",
648 sample->id, event->lost_samples.lost,
649 event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : "");
650 return 0;
651}
652
653int machine__process_aux_event(struct machine *machine __maybe_unused,
654 union perf_event *event)
655{
656 if (dump_trace)
657 perf_event__fprintf_aux(event, stdout);
658 return 0;
659}
660
661int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
662 union perf_event *event)
663{
664 if (dump_trace)
665 perf_event__fprintf_itrace_start(event, stdout);
666 return 0;
667}
668
669int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
670 union perf_event *event)
671{
672 if (dump_trace)
673 perf_event__fprintf_aux_output_hw_id(event, stdout);
674 return 0;
675}
676
677int machine__process_switch_event(struct machine *machine __maybe_unused,
678 union perf_event *event)
679{
680 if (dump_trace)
681 perf_event__fprintf_switch(event, stdout);
682 return 0;
683}
684
685static int machine__process_ksymbol_register(struct machine *machine,
686 union perf_event *event,
687 struct perf_sample *sample __maybe_unused)
688{
689 struct symbol *sym;
690 struct dso *dso = NULL;
691 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
692 int err = 0;
693
694 if (!map) {
695 dso = dso__new(event->ksymbol.name);
696
697 if (!dso) {
698 err = -ENOMEM;
699 goto out;
700 }
701 dso__set_kernel(dso, DSO_SPACE__KERNEL);
702 map = map__new2(0, dso);
703 if (!map) {
704 err = -ENOMEM;
705 goto out;
706 }
707 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
708 dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL);
709 dso__data(dso)->file_size = event->ksymbol.len;
710 dso__set_loaded(dso);
711 }
712
713 map__set_start(map, event->ksymbol.addr);
714 map__set_end(map, map__start(map) + event->ksymbol.len);
715 err = maps__insert(machine__kernel_maps(machine), map);
716 if (err) {
717 err = -ENOMEM;
718 goto out;
719 }
720
721 dso__set_loaded(dso);
722
723 if (is_bpf_image(event->ksymbol.name)) {
724 dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE);
725 dso__set_long_name(dso, "", false);
726 }
727 } else {
728 dso = dso__get(map__dso(map));
729 }
730
731 sym = symbol__new(map__map_ip(map, map__start(map)),
732 event->ksymbol.len,
733 0, 0, event->ksymbol.name);
734 if (!sym) {
735 err = -ENOMEM;
736 goto out;
737 }
738 dso__insert_symbol(dso, sym);
739out:
740 map__put(map);
741 dso__put(dso);
742 return err;
743}
744
745static int machine__process_ksymbol_unregister(struct machine *machine,
746 union perf_event *event,
747 struct perf_sample *sample __maybe_unused)
748{
749 struct symbol *sym;
750 struct map *map;
751
752 map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
753 if (!map)
754 return 0;
755
756 if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
757 maps__remove(machine__kernel_maps(machine), map);
758 else {
759 struct dso *dso = map__dso(map);
760
761 sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
762 if (sym)
763 dso__delete_symbol(dso, sym);
764 }
765 map__put(map);
766 return 0;
767}
768
769int machine__process_ksymbol(struct machine *machine __maybe_unused,
770 union perf_event *event,
771 struct perf_sample *sample)
772{
773 if (dump_trace)
774 perf_event__fprintf_ksymbol(event, stdout);
775
776 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
777 return machine__process_ksymbol_unregister(machine, event,
778 sample);
779 return machine__process_ksymbol_register(machine, event, sample);
780}
781
782int machine__process_text_poke(struct machine *machine, union perf_event *event,
783 struct perf_sample *sample __maybe_unused)
784{
785 struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
786 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
787 struct dso *dso = map ? map__dso(map) : NULL;
788
789 if (dump_trace)
790 perf_event__fprintf_text_poke(event, machine, stdout);
791
792 if (!event->text_poke.new_len)
793 goto out;
794
795 if (cpumode != PERF_RECORD_MISC_KERNEL) {
796 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
797 goto out;
798 }
799
800 if (dso) {
801 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
802 int ret;
803
804 /*
805 * Kernel maps might be changed when loading symbols so loading
806 * must be done prior to using kernel maps.
807 */
808 map__load(map);
809 ret = dso__data_write_cache_addr(dso, map, machine,
810 event->text_poke.addr,
811 new_bytes,
812 event->text_poke.new_len);
813 if (ret != event->text_poke.new_len)
814 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
815 event->text_poke.addr);
816 } else {
817 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
818 event->text_poke.addr);
819 }
820out:
821 map__put(map);
822 return 0;
823}
824
825static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
826 const char *filename)
827{
828 struct map *map = NULL;
829 struct kmod_path m;
830 struct dso *dso;
831 int err;
832
833 if (kmod_path__parse_name(&m, filename))
834 return NULL;
835
836 dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename);
837 if (dso == NULL)
838 goto out;
839
840 map = map__new2(start, dso);
841 if (map == NULL)
842 goto out;
843
844 err = maps__insert(machine__kernel_maps(machine), map);
845 /* If maps__insert failed, return NULL. */
846 if (err) {
847 map__put(map);
848 map = NULL;
849 }
850out:
851 /* put the dso here, corresponding to machine__findnew_module_dso */
852 dso__put(dso);
853 zfree(&m.name);
854 return map;
855}
856
857size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
858{
859 struct rb_node *nd;
860 size_t ret = dsos__fprintf(&machines->host.dsos, fp);
861
862 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
863 struct machine *pos = rb_entry(nd, struct machine, rb_node);
864 ret += dsos__fprintf(&pos->dsos, fp);
865 }
866
867 return ret;
868}
869
870size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
871 bool (skip)(struct dso *dso, int parm), int parm)
872{
873 return dsos__fprintf_buildid(&m->dsos, fp, skip, parm);
874}
875
876size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
877 bool (skip)(struct dso *dso, int parm), int parm)
878{
879 struct rb_node *nd;
880 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
881
882 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
883 struct machine *pos = rb_entry(nd, struct machine, rb_node);
884 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
885 }
886 return ret;
887}
888
889size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
890{
891 int i;
892 size_t printed = 0;
893 struct dso *kdso = machine__kernel_dso(machine);
894
895 if (dso__has_build_id(kdso)) {
896 char filename[PATH_MAX];
897
898 if (dso__build_id_filename(kdso, filename, sizeof(filename), false))
899 printed += fprintf(fp, "[0] %s\n", filename);
900 }
901
902 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
903 printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso),
904 vmlinux_path[i]);
905 }
906 return printed;
907}
908
909struct machine_fprintf_cb_args {
910 FILE *fp;
911 size_t printed;
912};
913
914static int machine_fprintf_cb(struct thread *thread, void *data)
915{
916 struct machine_fprintf_cb_args *args = data;
917
918 /* TODO: handle fprintf errors. */
919 args->printed += thread__fprintf(thread, args->fp);
920 return 0;
921}
922
923size_t machine__fprintf(struct machine *machine, FILE *fp)
924{
925 struct machine_fprintf_cb_args args = {
926 .fp = fp,
927 .printed = 0,
928 };
929 size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads));
930
931 machine__for_each_thread(machine, machine_fprintf_cb, &args);
932 return ret + args.printed;
933}
934
935static struct dso *machine__get_kernel(struct machine *machine)
936{
937 const char *vmlinux_name = machine->mmap_name;
938 struct dso *kernel;
939
940 if (machine__is_host(machine)) {
941 if (symbol_conf.vmlinux_name)
942 vmlinux_name = symbol_conf.vmlinux_name;
943
944 kernel = machine__findnew_kernel(machine, vmlinux_name,
945 "[kernel]", DSO_SPACE__KERNEL);
946 } else {
947 if (symbol_conf.default_guest_vmlinux_name)
948 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
949
950 kernel = machine__findnew_kernel(machine, vmlinux_name,
951 "[guest.kernel]",
952 DSO_SPACE__KERNEL_GUEST);
953 }
954
955 if (kernel != NULL && (!dso__has_build_id(kernel)))
956 dso__read_running_kernel_build_id(kernel, machine);
957
958 return kernel;
959}
960
961void machine__get_kallsyms_filename(struct machine *machine, char *buf,
962 size_t bufsz)
963{
964 if (machine__is_default_guest(machine))
965 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
966 else
967 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
968}
969
970const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
971
972/* Figure out the start address of kernel map from /proc/kallsyms.
973 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
974 * symbol_name if it's not that important.
975 */
976static int machine__get_running_kernel_start(struct machine *machine,
977 const char **symbol_name,
978 u64 *start, u64 *end)
979{
980 char filename[PATH_MAX];
981 int i, err = -1;
982 const char *name;
983 u64 addr = 0;
984
985 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
986
987 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
988 return 0;
989
990 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
991 err = kallsyms__get_function_start(filename, name, &addr);
992 if (!err)
993 break;
994 }
995
996 if (err)
997 return -1;
998
999 if (symbol_name)
1000 *symbol_name = name;
1001
1002 *start = addr;
1003
1004 err = kallsyms__get_symbol_start(filename, "_edata", &addr);
1005 if (err)
1006 err = kallsyms__get_symbol_start(filename, "_etext", &addr);
1007 if (!err)
1008 *end = addr;
1009
1010 return 0;
1011}
1012
1013int machine__create_extra_kernel_map(struct machine *machine,
1014 struct dso *kernel,
1015 struct extra_kernel_map *xm)
1016{
1017 struct kmap *kmap;
1018 struct map *map;
1019 int err;
1020
1021 map = map__new2(xm->start, kernel);
1022 if (!map)
1023 return -ENOMEM;
1024
1025 map__set_end(map, xm->end);
1026 map__set_pgoff(map, xm->pgoff);
1027
1028 kmap = map__kmap(map);
1029
1030 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1031
1032 err = maps__insert(machine__kernel_maps(machine), map);
1033
1034 if (!err) {
1035 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1036 kmap->name, map__start(map), map__end(map));
1037 }
1038
1039 map__put(map);
1040
1041 return err;
1042}
1043
1044static u64 find_entry_trampoline(struct dso *dso)
1045{
1046 /* Duplicates are removed so lookup all aliases */
1047 const char *syms[] = {
1048 "_entry_trampoline",
1049 "__entry_trampoline_start",
1050 "entry_SYSCALL_64_trampoline",
1051 };
1052 struct symbol *sym = dso__first_symbol(dso);
1053 unsigned int i;
1054
1055 for (; sym; sym = dso__next_symbol(sym)) {
1056 if (sym->binding != STB_GLOBAL)
1057 continue;
1058 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1059 if (!strcmp(sym->name, syms[i]))
1060 return sym->start;
1061 }
1062 }
1063
1064 return 0;
1065}
1066
1067/*
1068 * These values can be used for kernels that do not have symbols for the entry
1069 * trampolines in kallsyms.
1070 */
1071#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1072#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1073#define X86_64_ENTRY_TRAMPOLINE 0x6000
1074
1075struct machine__map_x86_64_entry_trampolines_args {
1076 struct maps *kmaps;
1077 bool found;
1078};
1079
1080static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
1081{
1082 struct machine__map_x86_64_entry_trampolines_args *args = data;
1083 struct map *dest_map;
1084 struct kmap *kmap = __map__kmap(map);
1085
1086 if (!kmap || !is_entry_trampoline(kmap->name))
1087 return 0;
1088
1089 dest_map = maps__find(args->kmaps, map__pgoff(map));
1090 if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map))
1091 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1092
1093 map__put(dest_map);
1094 args->found = true;
1095 return 0;
1096}
1097
1098/* Map x86_64 PTI entry trampolines */
1099int machine__map_x86_64_entry_trampolines(struct machine *machine,
1100 struct dso *kernel)
1101{
1102 struct machine__map_x86_64_entry_trampolines_args args = {
1103 .kmaps = machine__kernel_maps(machine),
1104 .found = false,
1105 };
1106 int nr_cpus_avail, cpu;
1107 u64 pgoff;
1108
1109 /*
1110 * In the vmlinux case, pgoff is a virtual address which must now be
1111 * mapped to a vmlinux offset.
1112 */
1113 maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
1114
1115 if (args.found || machine->trampolines_mapped)
1116 return 0;
1117
1118 pgoff = find_entry_trampoline(kernel);
1119 if (!pgoff)
1120 return 0;
1121
1122 nr_cpus_avail = machine__nr_cpus_avail(machine);
1123
1124 /* Add a 1 page map for each CPU's entry trampoline */
1125 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1126 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1127 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1128 X86_64_ENTRY_TRAMPOLINE;
1129 struct extra_kernel_map xm = {
1130 .start = va,
1131 .end = va + page_size,
1132 .pgoff = pgoff,
1133 };
1134
1135 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1136
1137 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1138 return -1;
1139 }
1140
1141 machine->trampolines_mapped = nr_cpus_avail;
1142
1143 return 0;
1144}
1145
1146int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1147 struct dso *kernel __maybe_unused)
1148{
1149 return 0;
1150}
1151
1152static int
1153__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1154{
1155 /* In case of renewal the kernel map, destroy previous one */
1156 machine__destroy_kernel_maps(machine);
1157
1158 map__put(machine->vmlinux_map);
1159 machine->vmlinux_map = map__new2(0, kernel);
1160 if (machine->vmlinux_map == NULL)
1161 return -ENOMEM;
1162
1163 map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
1164 return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1165}
1166
1167void machine__destroy_kernel_maps(struct machine *machine)
1168{
1169 struct kmap *kmap;
1170 struct map *map = machine__kernel_map(machine);
1171
1172 if (map == NULL)
1173 return;
1174
1175 kmap = map__kmap(map);
1176 maps__remove(machine__kernel_maps(machine), map);
1177 if (kmap && kmap->ref_reloc_sym) {
1178 zfree((char **)&kmap->ref_reloc_sym->name);
1179 zfree(&kmap->ref_reloc_sym);
1180 }
1181
1182 map__zput(machine->vmlinux_map);
1183}
1184
1185int machines__create_guest_kernel_maps(struct machines *machines)
1186{
1187 int ret = 0;
1188 struct dirent **namelist = NULL;
1189 int i, items = 0;
1190 char path[PATH_MAX];
1191 pid_t pid;
1192 char *endp;
1193
1194 if (symbol_conf.default_guest_vmlinux_name ||
1195 symbol_conf.default_guest_modules ||
1196 symbol_conf.default_guest_kallsyms) {
1197 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1198 }
1199
1200 if (symbol_conf.guestmount) {
1201 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1202 if (items <= 0)
1203 return -ENOENT;
1204 for (i = 0; i < items; i++) {
1205 if (!isdigit(namelist[i]->d_name[0])) {
1206 /* Filter out . and .. */
1207 continue;
1208 }
1209 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1210 if ((*endp != '\0') ||
1211 (endp == namelist[i]->d_name) ||
1212 (errno == ERANGE)) {
1213 pr_debug("invalid directory (%s). Skipping.\n",
1214 namelist[i]->d_name);
1215 continue;
1216 }
1217 sprintf(path, "%s/%s/proc/kallsyms",
1218 symbol_conf.guestmount,
1219 namelist[i]->d_name);
1220 ret = access(path, R_OK);
1221 if (ret) {
1222 pr_debug("Can't access file %s\n", path);
1223 goto failure;
1224 }
1225 machines__create_kernel_maps(machines, pid);
1226 }
1227failure:
1228 free(namelist);
1229 }
1230
1231 return ret;
1232}
1233
1234void machines__destroy_kernel_maps(struct machines *machines)
1235{
1236 struct rb_node *next = rb_first_cached(&machines->guests);
1237
1238 machine__destroy_kernel_maps(&machines->host);
1239
1240 while (next) {
1241 struct machine *pos = rb_entry(next, struct machine, rb_node);
1242
1243 next = rb_next(&pos->rb_node);
1244 rb_erase_cached(&pos->rb_node, &machines->guests);
1245 machine__delete(pos);
1246 }
1247}
1248
1249int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1250{
1251 struct machine *machine = machines__findnew(machines, pid);
1252
1253 if (machine == NULL)
1254 return -1;
1255
1256 return machine__create_kernel_maps(machine);
1257}
1258
1259int machine__load_kallsyms(struct machine *machine, const char *filename)
1260{
1261 struct map *map = machine__kernel_map(machine);
1262 struct dso *dso = map__dso(map);
1263 int ret = __dso__load_kallsyms(dso, filename, map, true);
1264
1265 if (ret > 0) {
1266 dso__set_loaded(dso);
1267 /*
1268 * Since /proc/kallsyms will have multiple sessions for the
1269 * kernel, with modules between them, fixup the end of all
1270 * sections.
1271 */
1272 maps__fixup_end(machine__kernel_maps(machine));
1273 }
1274
1275 return ret;
1276}
1277
1278int machine__load_vmlinux_path(struct machine *machine)
1279{
1280 struct map *map = machine__kernel_map(machine);
1281 struct dso *dso = map__dso(map);
1282 int ret = dso__load_vmlinux_path(dso, map);
1283
1284 if (ret > 0)
1285 dso__set_loaded(dso);
1286
1287 return ret;
1288}
1289
1290static char *get_kernel_version(const char *root_dir)
1291{
1292 char version[PATH_MAX];
1293 FILE *file;
1294 char *name, *tmp;
1295 const char *prefix = "Linux version ";
1296
1297 sprintf(version, "%s/proc/version", root_dir);
1298 file = fopen(version, "r");
1299 if (!file)
1300 return NULL;
1301
1302 tmp = fgets(version, sizeof(version), file);
1303 fclose(file);
1304 if (!tmp)
1305 return NULL;
1306
1307 name = strstr(version, prefix);
1308 if (!name)
1309 return NULL;
1310 name += strlen(prefix);
1311 tmp = strchr(name, ' ');
1312 if (tmp)
1313 *tmp = '\0';
1314
1315 return strdup(name);
1316}
1317
1318static bool is_kmod_dso(struct dso *dso)
1319{
1320 return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1321 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE;
1322}
1323
1324static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1325{
1326 char *long_name;
1327 struct dso *dso;
1328 struct map *map = maps__find_by_name(maps, m->name);
1329
1330 if (map == NULL)
1331 return 0;
1332
1333 long_name = strdup(path);
1334 if (long_name == NULL) {
1335 map__put(map);
1336 return -ENOMEM;
1337 }
1338
1339 dso = map__dso(map);
1340 dso__set_long_name(dso, long_name, true);
1341 dso__kernel_module_get_build_id(dso, "");
1342
1343 /*
1344 * Full name could reveal us kmod compression, so
1345 * we need to update the symtab_type if needed.
1346 */
1347 if (m->comp && is_kmod_dso(dso)) {
1348 dso__set_symtab_type(dso, dso__symtab_type(dso)+1);
1349 dso__set_comp(dso, m->comp);
1350 }
1351 map__put(map);
1352 return 0;
1353}
1354
1355static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1356{
1357 struct dirent *dent;
1358 DIR *dir = opendir(dir_name);
1359 int ret = 0;
1360
1361 if (!dir) {
1362 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1363 return -1;
1364 }
1365
1366 while ((dent = readdir(dir)) != NULL) {
1367 char path[PATH_MAX];
1368 struct stat st;
1369
1370 /*sshfs might return bad dent->d_type, so we have to stat*/
1371 path__join(path, sizeof(path), dir_name, dent->d_name);
1372 if (stat(path, &st))
1373 continue;
1374
1375 if (S_ISDIR(st.st_mode)) {
1376 if (!strcmp(dent->d_name, ".") ||
1377 !strcmp(dent->d_name, ".."))
1378 continue;
1379
1380 /* Do not follow top-level source and build symlinks */
1381 if (depth == 0) {
1382 if (!strcmp(dent->d_name, "source") ||
1383 !strcmp(dent->d_name, "build"))
1384 continue;
1385 }
1386
1387 ret = maps__set_modules_path_dir(maps, path, depth + 1);
1388 if (ret < 0)
1389 goto out;
1390 } else {
1391 struct kmod_path m;
1392
1393 ret = kmod_path__parse_name(&m, dent->d_name);
1394 if (ret)
1395 goto out;
1396
1397 if (m.kmod)
1398 ret = maps__set_module_path(maps, path, &m);
1399
1400 zfree(&m.name);
1401
1402 if (ret)
1403 goto out;
1404 }
1405 }
1406
1407out:
1408 closedir(dir);
1409 return ret;
1410}
1411
1412static int machine__set_modules_path(struct machine *machine)
1413{
1414 char *version;
1415 char modules_path[PATH_MAX];
1416
1417 version = get_kernel_version(machine->root_dir);
1418 if (!version)
1419 return -1;
1420
1421 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1422 machine->root_dir, version);
1423 free(version);
1424
1425 return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1426}
1427int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1428 u64 *size __maybe_unused,
1429 const char *name __maybe_unused)
1430{
1431 return 0;
1432}
1433
1434static int machine__create_module(void *arg, const char *name, u64 start,
1435 u64 size)
1436{
1437 struct machine *machine = arg;
1438 struct map *map;
1439
1440 if (arch__fix_module_text_start(&start, &size, name) < 0)
1441 return -1;
1442
1443 map = machine__addnew_module_map(machine, start, name);
1444 if (map == NULL)
1445 return -1;
1446 map__set_end(map, start + size);
1447
1448 dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1449 map__put(map);
1450 return 0;
1451}
1452
1453static int machine__create_modules(struct machine *machine)
1454{
1455 const char *modules;
1456 char path[PATH_MAX];
1457
1458 if (machine__is_default_guest(machine)) {
1459 modules = symbol_conf.default_guest_modules;
1460 } else {
1461 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1462 modules = path;
1463 }
1464
1465 if (symbol__restricted_filename(modules, "/proc/modules"))
1466 return -1;
1467
1468 if (modules__parse(modules, machine, machine__create_module))
1469 return -1;
1470
1471 if (!machine__set_modules_path(machine))
1472 return 0;
1473
1474 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1475
1476 return 0;
1477}
1478
1479static void machine__set_kernel_mmap(struct machine *machine,
1480 u64 start, u64 end)
1481{
1482 map__set_start(machine->vmlinux_map, start);
1483 map__set_end(machine->vmlinux_map, end);
1484 /*
1485 * Be a bit paranoid here, some perf.data file came with
1486 * a zero sized synthesized MMAP event for the kernel.
1487 */
1488 if (start == 0 && end == 0)
1489 map__set_end(machine->vmlinux_map, ~0ULL);
1490}
1491
1492static int machine__update_kernel_mmap(struct machine *machine,
1493 u64 start, u64 end)
1494{
1495 struct map *orig, *updated;
1496 int err;
1497
1498 orig = machine->vmlinux_map;
1499 updated = map__get(orig);
1500
1501 machine->vmlinux_map = updated;
1502 maps__remove(machine__kernel_maps(machine), orig);
1503 machine__set_kernel_mmap(machine, start, end);
1504 err = maps__insert(machine__kernel_maps(machine), updated);
1505 map__put(orig);
1506
1507 return err;
1508}
1509
1510int machine__create_kernel_maps(struct machine *machine)
1511{
1512 struct dso *kernel = machine__get_kernel(machine);
1513 const char *name = NULL;
1514 u64 start = 0, end = ~0ULL;
1515 int ret;
1516
1517 if (kernel == NULL)
1518 return -1;
1519
1520 ret = __machine__create_kernel_maps(machine, kernel);
1521 if (ret < 0)
1522 goto out_put;
1523
1524 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1525 if (machine__is_host(machine))
1526 pr_debug("Problems creating module maps, "
1527 "continuing anyway...\n");
1528 else
1529 pr_debug("Problems creating module maps for guest %d, "
1530 "continuing anyway...\n", machine->pid);
1531 }
1532
1533 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1534 if (name &&
1535 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1536 machine__destroy_kernel_maps(machine);
1537 ret = -1;
1538 goto out_put;
1539 }
1540
1541 /*
1542 * we have a real start address now, so re-order the kmaps
1543 * assume it's the last in the kmaps
1544 */
1545 ret = machine__update_kernel_mmap(machine, start, end);
1546 if (ret < 0)
1547 goto out_put;
1548 }
1549
1550 if (machine__create_extra_kernel_maps(machine, kernel))
1551 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1552
1553 if (end == ~0ULL) {
1554 /* update end address of the kernel map using adjacent module address */
1555 struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
1556 machine__kernel_map(machine));
1557
1558 if (next) {
1559 machine__set_kernel_mmap(machine, start, map__start(next));
1560 map__put(next);
1561 }
1562 }
1563
1564out_put:
1565 dso__put(kernel);
1566 return ret;
1567}
1568
1569static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused)
1570{
1571 return dso__is_kcore(dso) ? 1 : 0;
1572}
1573
1574static bool machine__uses_kcore(struct machine *machine)
1575{
1576 return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false;
1577}
1578
1579static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1580 struct extra_kernel_map *xm)
1581{
1582 return machine__is(machine, "x86_64") &&
1583 is_entry_trampoline(xm->name);
1584}
1585
1586static int machine__process_extra_kernel_map(struct machine *machine,
1587 struct extra_kernel_map *xm)
1588{
1589 struct dso *kernel = machine__kernel_dso(machine);
1590
1591 if (kernel == NULL)
1592 return -1;
1593
1594 return machine__create_extra_kernel_map(machine, kernel, xm);
1595}
1596
1597static int machine__process_kernel_mmap_event(struct machine *machine,
1598 struct extra_kernel_map *xm,
1599 struct build_id *bid)
1600{
1601 enum dso_space_type dso_space;
1602 bool is_kernel_mmap;
1603 const char *mmap_name = machine->mmap_name;
1604
1605 /* If we have maps from kcore then we do not need or want any others */
1606 if (machine__uses_kcore(machine))
1607 return 0;
1608
1609 if (machine__is_host(machine))
1610 dso_space = DSO_SPACE__KERNEL;
1611 else
1612 dso_space = DSO_SPACE__KERNEL_GUEST;
1613
1614 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1615 if (!is_kernel_mmap && !machine__is_host(machine)) {
1616 /*
1617 * If the event was recorded inside the guest and injected into
1618 * the host perf.data file, then it will match a host mmap_name,
1619 * so try that - see machine__set_mmap_name().
1620 */
1621 mmap_name = "[kernel.kallsyms]";
1622 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1623 }
1624 if (xm->name[0] == '/' ||
1625 (!is_kernel_mmap && xm->name[0] == '[')) {
1626 struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
1627
1628 if (map == NULL)
1629 goto out_problem;
1630
1631 map__set_end(map, map__start(map) + xm->end - xm->start);
1632
1633 if (build_id__is_defined(bid))
1634 dso__set_build_id(map__dso(map), bid);
1635
1636 map__put(map);
1637 } else if (is_kernel_mmap) {
1638 const char *symbol_name = xm->name + strlen(mmap_name);
1639 /*
1640 * Should be there already, from the build-id table in
1641 * the header.
1642 */
1643 struct dso *kernel = dsos__find_kernel_dso(&machine->dsos);
1644
1645 if (kernel == NULL)
1646 kernel = machine__findnew_dso(machine, machine->mmap_name);
1647 if (kernel == NULL)
1648 goto out_problem;
1649
1650 dso__set_kernel(kernel, dso_space);
1651 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1652 dso__put(kernel);
1653 goto out_problem;
1654 }
1655
1656 if (strstr(dso__long_name(kernel), "vmlinux"))
1657 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1658
1659 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1660 dso__put(kernel);
1661 goto out_problem;
1662 }
1663
1664 if (build_id__is_defined(bid))
1665 dso__set_build_id(kernel, bid);
1666
1667 /*
1668 * Avoid using a zero address (kptr_restrict) for the ref reloc
1669 * symbol. Effectively having zero here means that at record
1670 * time /proc/sys/kernel/kptr_restrict was non zero.
1671 */
1672 if (xm->pgoff != 0) {
1673 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1674 symbol_name,
1675 xm->pgoff);
1676 }
1677
1678 if (machine__is_default_guest(machine)) {
1679 /*
1680 * preload dso of guest kernel and modules
1681 */
1682 dso__load(kernel, machine__kernel_map(machine));
1683 }
1684 dso__put(kernel);
1685 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1686 return machine__process_extra_kernel_map(machine, xm);
1687 }
1688 return 0;
1689out_problem:
1690 return -1;
1691}
1692
1693int machine__process_mmap2_event(struct machine *machine,
1694 union perf_event *event,
1695 struct perf_sample *sample)
1696{
1697 struct thread *thread;
1698 struct map *map;
1699 struct dso_id dso_id = {
1700 .maj = event->mmap2.maj,
1701 .min = event->mmap2.min,
1702 .ino = event->mmap2.ino,
1703 .ino_generation = event->mmap2.ino_generation,
1704 };
1705 struct build_id __bid, *bid = NULL;
1706 int ret = 0;
1707
1708 if (dump_trace)
1709 perf_event__fprintf_mmap2(event, stdout);
1710
1711 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1712 bid = &__bid;
1713 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1714 }
1715
1716 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1717 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1718 struct extra_kernel_map xm = {
1719 .start = event->mmap2.start,
1720 .end = event->mmap2.start + event->mmap2.len,
1721 .pgoff = event->mmap2.pgoff,
1722 };
1723
1724 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1725 ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1726 if (ret < 0)
1727 goto out_problem;
1728 return 0;
1729 }
1730
1731 thread = machine__findnew_thread(machine, event->mmap2.pid,
1732 event->mmap2.tid);
1733 if (thread == NULL)
1734 goto out_problem;
1735
1736 map = map__new(machine, event->mmap2.start,
1737 event->mmap2.len, event->mmap2.pgoff,
1738 &dso_id, event->mmap2.prot,
1739 event->mmap2.flags, bid,
1740 event->mmap2.filename, thread);
1741
1742 if (map == NULL)
1743 goto out_problem_map;
1744
1745 ret = thread__insert_map(thread, map);
1746 if (ret)
1747 goto out_problem_insert;
1748
1749 thread__put(thread);
1750 map__put(map);
1751 return 0;
1752
1753out_problem_insert:
1754 map__put(map);
1755out_problem_map:
1756 thread__put(thread);
1757out_problem:
1758 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1759 return 0;
1760}
1761
1762int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1763 struct perf_sample *sample)
1764{
1765 struct thread *thread;
1766 struct map *map;
1767 u32 prot = 0;
1768 int ret = 0;
1769
1770 if (dump_trace)
1771 perf_event__fprintf_mmap(event, stdout);
1772
1773 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1774 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1775 struct extra_kernel_map xm = {
1776 .start = event->mmap.start,
1777 .end = event->mmap.start + event->mmap.len,
1778 .pgoff = event->mmap.pgoff,
1779 };
1780
1781 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1782 ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1783 if (ret < 0)
1784 goto out_problem;
1785 return 0;
1786 }
1787
1788 thread = machine__findnew_thread(machine, event->mmap.pid,
1789 event->mmap.tid);
1790 if (thread == NULL)
1791 goto out_problem;
1792
1793 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1794 prot = PROT_EXEC;
1795
1796 map = map__new(machine, event->mmap.start,
1797 event->mmap.len, event->mmap.pgoff,
1798 NULL, prot, 0, NULL, event->mmap.filename, thread);
1799
1800 if (map == NULL)
1801 goto out_problem_map;
1802
1803 ret = thread__insert_map(thread, map);
1804 if (ret)
1805 goto out_problem_insert;
1806
1807 thread__put(thread);
1808 map__put(map);
1809 return 0;
1810
1811out_problem_insert:
1812 map__put(map);
1813out_problem_map:
1814 thread__put(thread);
1815out_problem:
1816 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1817 return 0;
1818}
1819
1820void machine__remove_thread(struct machine *machine, struct thread *th)
1821{
1822 return threads__remove(&machine->threads, th);
1823}
1824
1825int machine__process_fork_event(struct machine *machine, union perf_event *event,
1826 struct perf_sample *sample)
1827{
1828 struct thread *thread = machine__find_thread(machine,
1829 event->fork.pid,
1830 event->fork.tid);
1831 struct thread *parent = machine__findnew_thread(machine,
1832 event->fork.ppid,
1833 event->fork.ptid);
1834 bool do_maps_clone = true;
1835 int err = 0;
1836
1837 if (dump_trace)
1838 perf_event__fprintf_task(event, stdout);
1839
1840 /*
1841 * There may be an existing thread that is not actually the parent,
1842 * either because we are processing events out of order, or because the
1843 * (fork) event that would have removed the thread was lost. Assume the
1844 * latter case and continue on as best we can.
1845 */
1846 if (thread__pid(parent) != (pid_t)event->fork.ppid) {
1847 dump_printf("removing erroneous parent thread %d/%d\n",
1848 thread__pid(parent), thread__tid(parent));
1849 machine__remove_thread(machine, parent);
1850 thread__put(parent);
1851 parent = machine__findnew_thread(machine, event->fork.ppid,
1852 event->fork.ptid);
1853 }
1854
1855 /* if a thread currently exists for the thread id remove it */
1856 if (thread != NULL) {
1857 machine__remove_thread(machine, thread);
1858 thread__put(thread);
1859 }
1860
1861 thread = machine__findnew_thread(machine, event->fork.pid,
1862 event->fork.tid);
1863 /*
1864 * When synthesizing FORK events, we are trying to create thread
1865 * objects for the already running tasks on the machine.
1866 *
1867 * Normally, for a kernel FORK event, we want to clone the parent's
1868 * maps because that is what the kernel just did.
1869 *
1870 * But when synthesizing, this should not be done. If we do, we end up
1871 * with overlapping maps as we process the synthesized MMAP2 events that
1872 * get delivered shortly thereafter.
1873 *
1874 * Use the FORK event misc flags in an internal way to signal this
1875 * situation, so we can elide the map clone when appropriate.
1876 */
1877 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1878 do_maps_clone = false;
1879
1880 if (thread == NULL || parent == NULL ||
1881 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1882 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1883 err = -1;
1884 }
1885 thread__put(thread);
1886 thread__put(parent);
1887
1888 return err;
1889}
1890
1891int machine__process_exit_event(struct machine *machine, union perf_event *event,
1892 struct perf_sample *sample __maybe_unused)
1893{
1894 struct thread *thread = machine__find_thread(machine,
1895 event->fork.pid,
1896 event->fork.tid);
1897
1898 if (dump_trace)
1899 perf_event__fprintf_task(event, stdout);
1900
1901 if (thread != NULL) {
1902 if (symbol_conf.keep_exited_threads)
1903 thread__set_exited(thread, /*exited=*/true);
1904 else
1905 machine__remove_thread(machine, thread);
1906 }
1907 thread__put(thread);
1908 return 0;
1909}
1910
1911int machine__process_event(struct machine *machine, union perf_event *event,
1912 struct perf_sample *sample)
1913{
1914 int ret;
1915
1916 switch (event->header.type) {
1917 case PERF_RECORD_COMM:
1918 ret = machine__process_comm_event(machine, event, sample); break;
1919 case PERF_RECORD_MMAP:
1920 ret = machine__process_mmap_event(machine, event, sample); break;
1921 case PERF_RECORD_NAMESPACES:
1922 ret = machine__process_namespaces_event(machine, event, sample); break;
1923 case PERF_RECORD_CGROUP:
1924 ret = machine__process_cgroup_event(machine, event, sample); break;
1925 case PERF_RECORD_MMAP2:
1926 ret = machine__process_mmap2_event(machine, event, sample); break;
1927 case PERF_RECORD_FORK:
1928 ret = machine__process_fork_event(machine, event, sample); break;
1929 case PERF_RECORD_EXIT:
1930 ret = machine__process_exit_event(machine, event, sample); break;
1931 case PERF_RECORD_LOST:
1932 ret = machine__process_lost_event(machine, event, sample); break;
1933 case PERF_RECORD_AUX:
1934 ret = machine__process_aux_event(machine, event); break;
1935 case PERF_RECORD_ITRACE_START:
1936 ret = machine__process_itrace_start_event(machine, event); break;
1937 case PERF_RECORD_LOST_SAMPLES:
1938 ret = machine__process_lost_samples_event(machine, event, sample); break;
1939 case PERF_RECORD_SWITCH:
1940 case PERF_RECORD_SWITCH_CPU_WIDE:
1941 ret = machine__process_switch_event(machine, event); break;
1942 case PERF_RECORD_KSYMBOL:
1943 ret = machine__process_ksymbol(machine, event, sample); break;
1944 case PERF_RECORD_BPF_EVENT:
1945 ret = machine__process_bpf(machine, event, sample); break;
1946 case PERF_RECORD_TEXT_POKE:
1947 ret = machine__process_text_poke(machine, event, sample); break;
1948 case PERF_RECORD_AUX_OUTPUT_HW_ID:
1949 ret = machine__process_aux_output_hw_id_event(machine, event); break;
1950 default:
1951 ret = -1;
1952 break;
1953 }
1954
1955 return ret;
1956}
1957
1958static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1959{
1960 return regexec(regex, sym->name, 0, NULL, 0) == 0;
1961}
1962
1963static void ip__resolve_ams(struct thread *thread,
1964 struct addr_map_symbol *ams,
1965 u64 ip)
1966{
1967 struct addr_location al;
1968
1969 addr_location__init(&al);
1970 /*
1971 * We cannot use the header.misc hint to determine whether a
1972 * branch stack address is user, kernel, guest, hypervisor.
1973 * Branches may straddle the kernel/user/hypervisor boundaries.
1974 * Thus, we have to try consecutively until we find a match
1975 * or else, the symbol is unknown
1976 */
1977 thread__find_cpumode_addr_location(thread, ip, &al);
1978
1979 ams->addr = ip;
1980 ams->al_addr = al.addr;
1981 ams->al_level = al.level;
1982 ams->ms.maps = maps__get(al.maps);
1983 ams->ms.sym = al.sym;
1984 ams->ms.map = map__get(al.map);
1985 ams->phys_addr = 0;
1986 ams->data_page_size = 0;
1987 addr_location__exit(&al);
1988}
1989
1990static void ip__resolve_data(struct thread *thread,
1991 u8 m, struct addr_map_symbol *ams,
1992 u64 addr, u64 phys_addr, u64 daddr_page_size)
1993{
1994 struct addr_location al;
1995
1996 addr_location__init(&al);
1997
1998 thread__find_symbol(thread, m, addr, &al);
1999
2000 ams->addr = addr;
2001 ams->al_addr = al.addr;
2002 ams->al_level = al.level;
2003 ams->ms.maps = maps__get(al.maps);
2004 ams->ms.sym = al.sym;
2005 ams->ms.map = map__get(al.map);
2006 ams->phys_addr = phys_addr;
2007 ams->data_page_size = daddr_page_size;
2008 addr_location__exit(&al);
2009}
2010
2011struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2012 struct addr_location *al)
2013{
2014 struct mem_info *mi = mem_info__new();
2015
2016 if (!mi)
2017 return NULL;
2018
2019 ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip);
2020 ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi),
2021 sample->addr, sample->phys_addr,
2022 sample->data_page_size);
2023 mem_info__data_src(mi)->val = sample->data_src;
2024
2025 return mi;
2026}
2027
2028static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2029{
2030 struct map *map = ms->map;
2031 char *srcline = NULL;
2032 struct dso *dso;
2033
2034 if (!map || callchain_param.key == CCKEY_FUNCTION)
2035 return srcline;
2036
2037 dso = map__dso(map);
2038 srcline = srcline__tree_find(dso__srclines(dso), ip);
2039 if (!srcline) {
2040 bool show_sym = false;
2041 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2042
2043 srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2044 ms->sym, show_sym, show_addr, ip);
2045 srcline__tree_insert(dso__srclines(dso), ip, srcline);
2046 }
2047
2048 return srcline;
2049}
2050
2051struct iterations {
2052 int nr_loop_iter;
2053 u64 cycles;
2054};
2055
2056static int add_callchain_ip(struct thread *thread,
2057 struct callchain_cursor *cursor,
2058 struct symbol **parent,
2059 struct addr_location *root_al,
2060 u8 *cpumode,
2061 u64 ip,
2062 bool branch,
2063 struct branch_flags *flags,
2064 struct iterations *iter,
2065 u64 branch_from,
2066 bool symbols)
2067{
2068 struct map_symbol ms = {};
2069 struct addr_location al;
2070 int nr_loop_iter = 0, err = 0;
2071 u64 iter_cycles = 0;
2072 const char *srcline = NULL;
2073
2074 addr_location__init(&al);
2075 al.filtered = 0;
2076 al.sym = NULL;
2077 al.srcline = NULL;
2078 if (!cpumode) {
2079 thread__find_cpumode_addr_location(thread, ip, &al);
2080 } else {
2081 if (ip >= PERF_CONTEXT_MAX) {
2082 switch (ip) {
2083 case PERF_CONTEXT_HV:
2084 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2085 break;
2086 case PERF_CONTEXT_KERNEL:
2087 *cpumode = PERF_RECORD_MISC_KERNEL;
2088 break;
2089 case PERF_CONTEXT_USER:
2090 *cpumode = PERF_RECORD_MISC_USER;
2091 break;
2092 default:
2093 pr_debug("invalid callchain context: "
2094 "%"PRId64"\n", (s64) ip);
2095 /*
2096 * It seems the callchain is corrupted.
2097 * Discard all.
2098 */
2099 callchain_cursor_reset(cursor);
2100 err = 1;
2101 goto out;
2102 }
2103 goto out;
2104 }
2105 if (symbols)
2106 thread__find_symbol(thread, *cpumode, ip, &al);
2107 }
2108
2109 if (al.sym != NULL) {
2110 if (perf_hpp_list.parent && !*parent &&
2111 symbol__match_regex(al.sym, &parent_regex))
2112 *parent = al.sym;
2113 else if (have_ignore_callees && root_al &&
2114 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2115 /* Treat this symbol as the root,
2116 forgetting its callees. */
2117 addr_location__copy(root_al, &al);
2118 callchain_cursor_reset(cursor);
2119 }
2120 }
2121
2122 if (symbol_conf.hide_unresolved && al.sym == NULL)
2123 goto out;
2124
2125 if (iter) {
2126 nr_loop_iter = iter->nr_loop_iter;
2127 iter_cycles = iter->cycles;
2128 }
2129
2130 ms.maps = maps__get(al.maps);
2131 ms.map = map__get(al.map);
2132 ms.sym = al.sym;
2133 srcline = callchain_srcline(&ms, al.addr);
2134 err = callchain_cursor_append(cursor, ip, &ms,
2135 branch, flags, nr_loop_iter,
2136 iter_cycles, branch_from, srcline);
2137out:
2138 addr_location__exit(&al);
2139 map_symbol__exit(&ms);
2140 return err;
2141}
2142
2143struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2144 struct addr_location *al)
2145{
2146 unsigned int i;
2147 const struct branch_stack *bs = sample->branch_stack;
2148 struct branch_entry *entries = perf_sample__branch_entries(sample);
2149 u64 *branch_stack_cntr = sample->branch_stack_cntr;
2150 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2151
2152 if (!bi)
2153 return NULL;
2154
2155 for (i = 0; i < bs->nr; i++) {
2156 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2157 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2158 bi[i].flags = entries[i].flags;
2159 if (branch_stack_cntr)
2160 bi[i].branch_stack_cntr = branch_stack_cntr[i];
2161 }
2162 return bi;
2163}
2164
2165static void save_iterations(struct iterations *iter,
2166 struct branch_entry *be, int nr)
2167{
2168 int i;
2169
2170 iter->nr_loop_iter++;
2171 iter->cycles = 0;
2172
2173 for (i = 0; i < nr; i++)
2174 iter->cycles += be[i].flags.cycles;
2175}
2176
2177#define CHASHSZ 127
2178#define CHASHBITS 7
2179#define NO_ENTRY 0xff
2180
2181#define PERF_MAX_BRANCH_DEPTH 127
2182
2183/* Remove loops. */
2184static int remove_loops(struct branch_entry *l, int nr,
2185 struct iterations *iter)
2186{
2187 int i, j, off;
2188 unsigned char chash[CHASHSZ];
2189
2190 memset(chash, NO_ENTRY, sizeof(chash));
2191
2192 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2193
2194 for (i = 0; i < nr; i++) {
2195 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2196
2197 /* no collision handling for now */
2198 if (chash[h] == NO_ENTRY) {
2199 chash[h] = i;
2200 } else if (l[chash[h]].from == l[i].from) {
2201 bool is_loop = true;
2202 /* check if it is a real loop */
2203 off = 0;
2204 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2205 if (l[j].from != l[i + off].from) {
2206 is_loop = false;
2207 break;
2208 }
2209 if (is_loop) {
2210 j = nr - (i + off);
2211 if (j > 0) {
2212 save_iterations(iter + i + off,
2213 l + i, off);
2214
2215 memmove(iter + i, iter + i + off,
2216 j * sizeof(*iter));
2217
2218 memmove(l + i, l + i + off,
2219 j * sizeof(*l));
2220 }
2221
2222 nr -= off;
2223 }
2224 }
2225 }
2226 return nr;
2227}
2228
2229static int lbr_callchain_add_kernel_ip(struct thread *thread,
2230 struct callchain_cursor *cursor,
2231 struct perf_sample *sample,
2232 struct symbol **parent,
2233 struct addr_location *root_al,
2234 u64 branch_from,
2235 bool callee, int end,
2236 bool symbols)
2237{
2238 struct ip_callchain *chain = sample->callchain;
2239 u8 cpumode = PERF_RECORD_MISC_USER;
2240 int err, i;
2241
2242 if (callee) {
2243 for (i = 0; i < end + 1; i++) {
2244 err = add_callchain_ip(thread, cursor, parent,
2245 root_al, &cpumode, chain->ips[i],
2246 false, NULL, NULL, branch_from,
2247 symbols);
2248 if (err)
2249 return err;
2250 }
2251 return 0;
2252 }
2253
2254 for (i = end; i >= 0; i--) {
2255 err = add_callchain_ip(thread, cursor, parent,
2256 root_al, &cpumode, chain->ips[i],
2257 false, NULL, NULL, branch_from,
2258 symbols);
2259 if (err)
2260 return err;
2261 }
2262
2263 return 0;
2264}
2265
2266static void save_lbr_cursor_node(struct thread *thread,
2267 struct callchain_cursor *cursor,
2268 int idx)
2269{
2270 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2271
2272 if (!lbr_stitch)
2273 return;
2274
2275 if (cursor->pos == cursor->nr) {
2276 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2277 return;
2278 }
2279
2280 if (!cursor->curr)
2281 cursor->curr = cursor->first;
2282 else
2283 cursor->curr = cursor->curr->next;
2284
2285 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
2286 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2287 sizeof(struct callchain_cursor_node));
2288 lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
2289 lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
2290
2291 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2292 cursor->pos++;
2293}
2294
2295static int lbr_callchain_add_lbr_ip(struct thread *thread,
2296 struct callchain_cursor *cursor,
2297 struct perf_sample *sample,
2298 struct symbol **parent,
2299 struct addr_location *root_al,
2300 u64 *branch_from,
2301 bool callee,
2302 bool symbols)
2303{
2304 struct branch_stack *lbr_stack = sample->branch_stack;
2305 struct branch_entry *entries = perf_sample__branch_entries(sample);
2306 u8 cpumode = PERF_RECORD_MISC_USER;
2307 int lbr_nr = lbr_stack->nr;
2308 struct branch_flags *flags;
2309 int err, i;
2310 u64 ip;
2311
2312 /*
2313 * The curr and pos are not used in writing session. They are cleared
2314 * in callchain_cursor_commit() when the writing session is closed.
2315 * Using curr and pos to track the current cursor node.
2316 */
2317 if (thread__lbr_stitch(thread)) {
2318 cursor->curr = NULL;
2319 cursor->pos = cursor->nr;
2320 if (cursor->nr) {
2321 cursor->curr = cursor->first;
2322 for (i = 0; i < (int)(cursor->nr - 1); i++)
2323 cursor->curr = cursor->curr->next;
2324 }
2325 }
2326
2327 if (callee) {
2328 /* Add LBR ip from first entries.to */
2329 ip = entries[0].to;
2330 flags = &entries[0].flags;
2331 *branch_from = entries[0].from;
2332 err = add_callchain_ip(thread, cursor, parent,
2333 root_al, &cpumode, ip,
2334 true, flags, NULL,
2335 *branch_from, symbols);
2336 if (err)
2337 return err;
2338
2339 /*
2340 * The number of cursor node increases.
2341 * Move the current cursor node.
2342 * But does not need to save current cursor node for entry 0.
2343 * It's impossible to stitch the whole LBRs of previous sample.
2344 */
2345 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2346 if (!cursor->curr)
2347 cursor->curr = cursor->first;
2348 else
2349 cursor->curr = cursor->curr->next;
2350 cursor->pos++;
2351 }
2352
2353 /* Add LBR ip from entries.from one by one. */
2354 for (i = 0; i < lbr_nr; i++) {
2355 ip = entries[i].from;
2356 flags = &entries[i].flags;
2357 err = add_callchain_ip(thread, cursor, parent,
2358 root_al, &cpumode, ip,
2359 true, flags, NULL,
2360 *branch_from, symbols);
2361 if (err)
2362 return err;
2363 save_lbr_cursor_node(thread, cursor, i);
2364 }
2365 return 0;
2366 }
2367
2368 /* Add LBR ip from entries.from one by one. */
2369 for (i = lbr_nr - 1; i >= 0; i--) {
2370 ip = entries[i].from;
2371 flags = &entries[i].flags;
2372 err = add_callchain_ip(thread, cursor, parent,
2373 root_al, &cpumode, ip,
2374 true, flags, NULL,
2375 *branch_from, symbols);
2376 if (err)
2377 return err;
2378 save_lbr_cursor_node(thread, cursor, i);
2379 }
2380
2381 if (lbr_nr > 0) {
2382 /* Add LBR ip from first entries.to */
2383 ip = entries[0].to;
2384 flags = &entries[0].flags;
2385 *branch_from = entries[0].from;
2386 err = add_callchain_ip(thread, cursor, parent,
2387 root_al, &cpumode, ip,
2388 true, flags, NULL,
2389 *branch_from, symbols);
2390 if (err)
2391 return err;
2392 }
2393
2394 return 0;
2395}
2396
2397static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2398 struct callchain_cursor *cursor)
2399{
2400 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2401 struct callchain_cursor_node *cnode;
2402 struct stitch_list *stitch_node;
2403 int err;
2404
2405 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2406 cnode = &stitch_node->cursor;
2407
2408 err = callchain_cursor_append(cursor, cnode->ip,
2409 &cnode->ms,
2410 cnode->branch,
2411 &cnode->branch_flags,
2412 cnode->nr_loop_iter,
2413 cnode->iter_cycles,
2414 cnode->branch_from,
2415 cnode->srcline);
2416 if (err)
2417 return err;
2418 }
2419 return 0;
2420}
2421
2422static struct stitch_list *get_stitch_node(struct thread *thread)
2423{
2424 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2425 struct stitch_list *stitch_node;
2426
2427 if (!list_empty(&lbr_stitch->free_lists)) {
2428 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2429 struct stitch_list, node);
2430 list_del(&stitch_node->node);
2431
2432 return stitch_node;
2433 }
2434
2435 return malloc(sizeof(struct stitch_list));
2436}
2437
2438static bool has_stitched_lbr(struct thread *thread,
2439 struct perf_sample *cur,
2440 struct perf_sample *prev,
2441 unsigned int max_lbr,
2442 bool callee)
2443{
2444 struct branch_stack *cur_stack = cur->branch_stack;
2445 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2446 struct branch_stack *prev_stack = prev->branch_stack;
2447 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2448 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2449 int i, j, nr_identical_branches = 0;
2450 struct stitch_list *stitch_node;
2451 u64 cur_base, distance;
2452
2453 if (!cur_stack || !prev_stack)
2454 return false;
2455
2456 /* Find the physical index of the base-of-stack for current sample. */
2457 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2458
2459 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2460 (max_lbr + prev_stack->hw_idx - cur_base);
2461 /* Previous sample has shorter stack. Nothing can be stitched. */
2462 if (distance + 1 > prev_stack->nr)
2463 return false;
2464
2465 /*
2466 * Check if there are identical LBRs between two samples.
2467 * Identical LBRs must have same from, to and flags values. Also,
2468 * they have to be saved in the same LBR registers (same physical
2469 * index).
2470 *
2471 * Starts from the base-of-stack of current sample.
2472 */
2473 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2474 if ((prev_entries[i].from != cur_entries[j].from) ||
2475 (prev_entries[i].to != cur_entries[j].to) ||
2476 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2477 break;
2478 nr_identical_branches++;
2479 }
2480
2481 if (!nr_identical_branches)
2482 return false;
2483
2484 /*
2485 * Save the LBRs between the base-of-stack of previous sample
2486 * and the base-of-stack of current sample into lbr_stitch->lists.
2487 * These LBRs will be stitched later.
2488 */
2489 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2490
2491 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2492 continue;
2493
2494 stitch_node = get_stitch_node(thread);
2495 if (!stitch_node)
2496 return false;
2497
2498 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2499 sizeof(struct callchain_cursor_node));
2500
2501 stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
2502 stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
2503
2504 if (callee)
2505 list_add(&stitch_node->node, &lbr_stitch->lists);
2506 else
2507 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2508 }
2509
2510 return true;
2511}
2512
2513static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2514{
2515 if (thread__lbr_stitch(thread))
2516 return true;
2517
2518 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2519 if (!thread__lbr_stitch(thread))
2520 goto err;
2521
2522 thread__lbr_stitch(thread)->prev_lbr_cursor =
2523 calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2524 if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2525 goto free_lbr_stitch;
2526
2527 thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
2528
2529 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2530 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2531
2532 return true;
2533
2534free_lbr_stitch:
2535 free(thread__lbr_stitch(thread));
2536 thread__set_lbr_stitch(thread, NULL);
2537err:
2538 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2539 thread__set_lbr_stitch_enable(thread, false);
2540 return false;
2541}
2542
2543/*
2544 * Resolve LBR callstack chain sample
2545 * Return:
2546 * 1 on success get LBR callchain information
2547 * 0 no available LBR callchain information, should try fp
2548 * negative error code on other errors.
2549 */
2550static int resolve_lbr_callchain_sample(struct thread *thread,
2551 struct callchain_cursor *cursor,
2552 struct perf_sample *sample,
2553 struct symbol **parent,
2554 struct addr_location *root_al,
2555 int max_stack,
2556 unsigned int max_lbr,
2557 bool symbols)
2558{
2559 bool callee = (callchain_param.order == ORDER_CALLEE);
2560 struct ip_callchain *chain = sample->callchain;
2561 int chain_nr = min(max_stack, (int)chain->nr), i;
2562 struct lbr_stitch *lbr_stitch;
2563 bool stitched_lbr = false;
2564 u64 branch_from = 0;
2565 int err;
2566
2567 for (i = 0; i < chain_nr; i++) {
2568 if (chain->ips[i] == PERF_CONTEXT_USER)
2569 break;
2570 }
2571
2572 /* LBR only affects the user callchain */
2573 if (i == chain_nr)
2574 return 0;
2575
2576 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2577 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2578 lbr_stitch = thread__lbr_stitch(thread);
2579
2580 stitched_lbr = has_stitched_lbr(thread, sample,
2581 &lbr_stitch->prev_sample,
2582 max_lbr, callee);
2583
2584 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2585 struct stitch_list *stitch_node;
2586
2587 list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
2588 map_symbol__exit(&stitch_node->cursor.ms);
2589
2590 list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
2591 }
2592 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2593 }
2594
2595 if (callee) {
2596 /* Add kernel ip */
2597 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2598 parent, root_al, branch_from,
2599 true, i, symbols);
2600 if (err)
2601 goto error;
2602
2603 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2604 root_al, &branch_from, true, symbols);
2605 if (err)
2606 goto error;
2607
2608 if (stitched_lbr) {
2609 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2610 if (err)
2611 goto error;
2612 }
2613
2614 } else {
2615 if (stitched_lbr) {
2616 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2617 if (err)
2618 goto error;
2619 }
2620 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2621 root_al, &branch_from, false, symbols);
2622 if (err)
2623 goto error;
2624
2625 /* Add kernel ip */
2626 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2627 parent, root_al, branch_from,
2628 false, i, symbols);
2629 if (err)
2630 goto error;
2631 }
2632 return 1;
2633
2634error:
2635 return (err < 0) ? err : 0;
2636}
2637
2638static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2639 struct callchain_cursor *cursor,
2640 struct symbol **parent,
2641 struct addr_location *root_al,
2642 u8 *cpumode, int ent, bool symbols)
2643{
2644 int err = 0;
2645
2646 while (--ent >= 0) {
2647 u64 ip = chain->ips[ent];
2648
2649 if (ip >= PERF_CONTEXT_MAX) {
2650 err = add_callchain_ip(thread, cursor, parent,
2651 root_al, cpumode, ip,
2652 false, NULL, NULL, 0, symbols);
2653 break;
2654 }
2655 }
2656 return err;
2657}
2658
2659static u64 get_leaf_frame_caller(struct perf_sample *sample,
2660 struct thread *thread, int usr_idx)
2661{
2662 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2663 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2664 else
2665 return 0;
2666}
2667
2668static int thread__resolve_callchain_sample(struct thread *thread,
2669 struct callchain_cursor *cursor,
2670 struct evsel *evsel,
2671 struct perf_sample *sample,
2672 struct symbol **parent,
2673 struct addr_location *root_al,
2674 int max_stack,
2675 bool symbols)
2676{
2677 struct branch_stack *branch = sample->branch_stack;
2678 struct branch_entry *entries = perf_sample__branch_entries(sample);
2679 struct ip_callchain *chain = sample->callchain;
2680 int chain_nr = 0;
2681 u8 cpumode = PERF_RECORD_MISC_USER;
2682 int i, j, err, nr_entries, usr_idx;
2683 int skip_idx = -1;
2684 int first_call = 0;
2685 u64 leaf_frame_caller;
2686
2687 if (chain)
2688 chain_nr = chain->nr;
2689
2690 if (evsel__has_branch_callstack(evsel)) {
2691 struct perf_env *env = evsel__env(evsel);
2692
2693 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2694 root_al, max_stack,
2695 !env ? 0 : env->max_branches,
2696 symbols);
2697 if (err)
2698 return (err < 0) ? err : 0;
2699 }
2700
2701 /*
2702 * Based on DWARF debug information, some architectures skip
2703 * a callchain entry saved by the kernel.
2704 */
2705 skip_idx = arch_skip_callchain_idx(thread, chain);
2706
2707 /*
2708 * Add branches to call stack for easier browsing. This gives
2709 * more context for a sample than just the callers.
2710 *
2711 * This uses individual histograms of paths compared to the
2712 * aggregated histograms the normal LBR mode uses.
2713 *
2714 * Limitations for now:
2715 * - No extra filters
2716 * - No annotations (should annotate somehow)
2717 */
2718
2719 if (branch && callchain_param.branch_callstack) {
2720 int nr = min(max_stack, (int)branch->nr);
2721 struct branch_entry be[nr];
2722 struct iterations iter[nr];
2723
2724 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2725 pr_warning("corrupted branch chain. skipping...\n");
2726 goto check_calls;
2727 }
2728
2729 for (i = 0; i < nr; i++) {
2730 if (callchain_param.order == ORDER_CALLEE) {
2731 be[i] = entries[i];
2732
2733 if (chain == NULL)
2734 continue;
2735
2736 /*
2737 * Check for overlap into the callchain.
2738 * The return address is one off compared to
2739 * the branch entry. To adjust for this
2740 * assume the calling instruction is not longer
2741 * than 8 bytes.
2742 */
2743 if (i == skip_idx ||
2744 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2745 first_call++;
2746 else if (be[i].from < chain->ips[first_call] &&
2747 be[i].from >= chain->ips[first_call] - 8)
2748 first_call++;
2749 } else
2750 be[i] = entries[branch->nr - i - 1];
2751 }
2752
2753 memset(iter, 0, sizeof(struct iterations) * nr);
2754 nr = remove_loops(be, nr, iter);
2755
2756 for (i = 0; i < nr; i++) {
2757 err = add_callchain_ip(thread, cursor, parent,
2758 root_al,
2759 NULL, be[i].to,
2760 true, &be[i].flags,
2761 NULL, be[i].from, symbols);
2762
2763 if (!err) {
2764 err = add_callchain_ip(thread, cursor, parent, root_al,
2765 NULL, be[i].from,
2766 true, &be[i].flags,
2767 &iter[i], 0, symbols);
2768 }
2769 if (err == -EINVAL)
2770 break;
2771 if (err)
2772 return err;
2773 }
2774
2775 if (chain_nr == 0)
2776 return 0;
2777
2778 chain_nr -= nr;
2779 }
2780
2781check_calls:
2782 if (chain && callchain_param.order != ORDER_CALLEE) {
2783 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2784 &cpumode, chain->nr - first_call, symbols);
2785 if (err)
2786 return (err < 0) ? err : 0;
2787 }
2788 for (i = first_call, nr_entries = 0;
2789 i < chain_nr && nr_entries < max_stack; i++) {
2790 u64 ip;
2791
2792 if (callchain_param.order == ORDER_CALLEE)
2793 j = i;
2794 else
2795 j = chain->nr - i - 1;
2796
2797#ifdef HAVE_SKIP_CALLCHAIN_IDX
2798 if (j == skip_idx)
2799 continue;
2800#endif
2801 ip = chain->ips[j];
2802 if (ip < PERF_CONTEXT_MAX)
2803 ++nr_entries;
2804 else if (callchain_param.order != ORDER_CALLEE) {
2805 err = find_prev_cpumode(chain, thread, cursor, parent,
2806 root_al, &cpumode, j, symbols);
2807 if (err)
2808 return (err < 0) ? err : 0;
2809 continue;
2810 }
2811
2812 /*
2813 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
2814 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
2815 * the index will be different in order to add the missing frame
2816 * at the right place.
2817 */
2818
2819 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
2820
2821 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
2822
2823 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
2824
2825 /*
2826 * check if leaf_frame_Caller != ip to not add the same
2827 * value twice.
2828 */
2829
2830 if (leaf_frame_caller && leaf_frame_caller != ip) {
2831
2832 err = add_callchain_ip(thread, cursor, parent,
2833 root_al, &cpumode, leaf_frame_caller,
2834 false, NULL, NULL, 0, symbols);
2835 if (err)
2836 return (err < 0) ? err : 0;
2837 }
2838 }
2839
2840 err = add_callchain_ip(thread, cursor, parent,
2841 root_al, &cpumode, ip,
2842 false, NULL, NULL, 0, symbols);
2843
2844 if (err)
2845 return (err < 0) ? err : 0;
2846 }
2847
2848 return 0;
2849}
2850
2851static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2852{
2853 struct symbol *sym = ms->sym;
2854 struct map *map = ms->map;
2855 struct inline_node *inline_node;
2856 struct inline_list *ilist;
2857 struct dso *dso;
2858 u64 addr;
2859 int ret = 1;
2860 struct map_symbol ilist_ms;
2861
2862 if (!symbol_conf.inline_name || !map || !sym)
2863 return ret;
2864
2865 addr = map__dso_map_ip(map, ip);
2866 addr = map__rip_2objdump(map, addr);
2867 dso = map__dso(map);
2868
2869 inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr);
2870 if (!inline_node) {
2871 inline_node = dso__parse_addr_inlines(dso, addr, sym);
2872 if (!inline_node)
2873 return ret;
2874 inlines__tree_insert(dso__inlined_nodes(dso), inline_node);
2875 }
2876
2877 ilist_ms = (struct map_symbol) {
2878 .maps = maps__get(ms->maps),
2879 .map = map__get(map),
2880 };
2881 list_for_each_entry(ilist, &inline_node->val, list) {
2882 ilist_ms.sym = ilist->symbol;
2883 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2884 NULL, 0, 0, 0, ilist->srcline);
2885
2886 if (ret != 0)
2887 return ret;
2888 }
2889 map_symbol__exit(&ilist_ms);
2890
2891 return ret;
2892}
2893
2894static int unwind_entry(struct unwind_entry *entry, void *arg)
2895{
2896 struct callchain_cursor *cursor = arg;
2897 const char *srcline = NULL;
2898 u64 addr = entry->ip;
2899
2900 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2901 return 0;
2902
2903 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2904 return 0;
2905
2906 /*
2907 * Convert entry->ip from a virtual address to an offset in
2908 * its corresponding binary.
2909 */
2910 if (entry->ms.map)
2911 addr = map__dso_map_ip(entry->ms.map, entry->ip);
2912
2913 srcline = callchain_srcline(&entry->ms, addr);
2914 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2915 false, NULL, 0, 0, 0, srcline);
2916}
2917
2918static int thread__resolve_callchain_unwind(struct thread *thread,
2919 struct callchain_cursor *cursor,
2920 struct evsel *evsel,
2921 struct perf_sample *sample,
2922 int max_stack, bool symbols)
2923{
2924 /* Can we do dwarf post unwind? */
2925 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2926 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2927 return 0;
2928
2929 /* Bail out if nothing was captured. */
2930 if ((!sample->user_regs.regs) ||
2931 (!sample->user_stack.size))
2932 return 0;
2933
2934 if (!symbols)
2935 pr_debug("Not resolving symbols with an unwinder isn't currently supported\n");
2936
2937 return unwind__get_entries(unwind_entry, cursor,
2938 thread, sample, max_stack, false);
2939}
2940
2941int __thread__resolve_callchain(struct thread *thread,
2942 struct callchain_cursor *cursor,
2943 struct evsel *evsel,
2944 struct perf_sample *sample,
2945 struct symbol **parent,
2946 struct addr_location *root_al,
2947 int max_stack,
2948 bool symbols)
2949{
2950 int ret = 0;
2951
2952 if (cursor == NULL)
2953 return -ENOMEM;
2954
2955 callchain_cursor_reset(cursor);
2956
2957 if (callchain_param.order == ORDER_CALLEE) {
2958 ret = thread__resolve_callchain_sample(thread, cursor,
2959 evsel, sample,
2960 parent, root_al,
2961 max_stack, symbols);
2962 if (ret)
2963 return ret;
2964 ret = thread__resolve_callchain_unwind(thread, cursor,
2965 evsel, sample,
2966 max_stack, symbols);
2967 } else {
2968 ret = thread__resolve_callchain_unwind(thread, cursor,
2969 evsel, sample,
2970 max_stack, symbols);
2971 if (ret)
2972 return ret;
2973 ret = thread__resolve_callchain_sample(thread, cursor,
2974 evsel, sample,
2975 parent, root_al,
2976 max_stack, symbols);
2977 }
2978
2979 return ret;
2980}
2981
2982int machine__for_each_thread(struct machine *machine,
2983 int (*fn)(struct thread *thread, void *p),
2984 void *priv)
2985{
2986 return threads__for_each_thread(&machine->threads, fn, priv);
2987}
2988
2989int machines__for_each_thread(struct machines *machines,
2990 int (*fn)(struct thread *thread, void *p),
2991 void *priv)
2992{
2993 struct rb_node *nd;
2994 int rc = 0;
2995
2996 rc = machine__for_each_thread(&machines->host, fn, priv);
2997 if (rc != 0)
2998 return rc;
2999
3000 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3001 struct machine *machine = rb_entry(nd, struct machine, rb_node);
3002
3003 rc = machine__for_each_thread(machine, fn, priv);
3004 if (rc != 0)
3005 return rc;
3006 }
3007 return rc;
3008}
3009
3010
3011static int thread_list_cb(struct thread *thread, void *data)
3012{
3013 struct list_head *list = data;
3014 struct thread_list *entry = malloc(sizeof(*entry));
3015
3016 if (!entry)
3017 return -ENOMEM;
3018
3019 entry->thread = thread__get(thread);
3020 list_add_tail(&entry->list, list);
3021 return 0;
3022}
3023
3024int machine__thread_list(struct machine *machine, struct list_head *list)
3025{
3026 return machine__for_each_thread(machine, thread_list_cb, list);
3027}
3028
3029void thread_list__delete(struct list_head *list)
3030{
3031 struct thread_list *pos, *next;
3032
3033 list_for_each_entry_safe(pos, next, list, list) {
3034 thread__zput(pos->thread);
3035 list_del(&pos->list);
3036 free(pos);
3037 }
3038}
3039
3040pid_t machine__get_current_tid(struct machine *machine, int cpu)
3041{
3042 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3043 return -1;
3044
3045 return machine->current_tid[cpu];
3046}
3047
3048int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3049 pid_t tid)
3050{
3051 struct thread *thread;
3052 const pid_t init_val = -1;
3053
3054 if (cpu < 0)
3055 return -EINVAL;
3056
3057 if (realloc_array_as_needed(machine->current_tid,
3058 machine->current_tid_sz,
3059 (unsigned int)cpu,
3060 &init_val))
3061 return -ENOMEM;
3062
3063 machine->current_tid[cpu] = tid;
3064
3065 thread = machine__findnew_thread(machine, pid, tid);
3066 if (!thread)
3067 return -ENOMEM;
3068
3069 thread__set_cpu(thread, cpu);
3070 thread__put(thread);
3071
3072 return 0;
3073}
3074
3075/*
3076 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3077 * machine__normalized_is() if a normalized arch is needed.
3078 */
3079bool machine__is(struct machine *machine, const char *arch)
3080{
3081 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3082}
3083
3084bool machine__normalized_is(struct machine *machine, const char *arch)
3085{
3086 return machine && !strcmp(perf_env__arch(machine->env), arch);
3087}
3088
3089int machine__nr_cpus_avail(struct machine *machine)
3090{
3091 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3092}
3093
3094int machine__get_kernel_start(struct machine *machine)
3095{
3096 struct map *map = machine__kernel_map(machine);
3097 int err = 0;
3098
3099 /*
3100 * The only addresses above 2^63 are kernel addresses of a 64-bit
3101 * kernel. Note that addresses are unsigned so that on a 32-bit system
3102 * all addresses including kernel addresses are less than 2^32. In
3103 * that case (32-bit system), if the kernel mapping is unknown, all
3104 * addresses will be assumed to be in user space - see
3105 * machine__kernel_ip().
3106 */
3107 machine->kernel_start = 1ULL << 63;
3108 if (map) {
3109 err = map__load(map);
3110 /*
3111 * On x86_64, PTI entry trampolines are less than the
3112 * start of kernel text, but still above 2^63. So leave
3113 * kernel_start = 1ULL << 63 for x86_64.
3114 */
3115 if (!err && !machine__is(machine, "x86_64"))
3116 machine->kernel_start = map__start(map);
3117 }
3118 return err;
3119}
3120
3121u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3122{
3123 u8 addr_cpumode = cpumode;
3124 bool kernel_ip;
3125
3126 if (!machine->single_address_space)
3127 goto out;
3128
3129 kernel_ip = machine__kernel_ip(machine, addr);
3130 switch (cpumode) {
3131 case PERF_RECORD_MISC_KERNEL:
3132 case PERF_RECORD_MISC_USER:
3133 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3134 PERF_RECORD_MISC_USER;
3135 break;
3136 case PERF_RECORD_MISC_GUEST_KERNEL:
3137 case PERF_RECORD_MISC_GUEST_USER:
3138 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3139 PERF_RECORD_MISC_GUEST_USER;
3140 break;
3141 default:
3142 break;
3143 }
3144out:
3145 return addr_cpumode;
3146}
3147
3148struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename,
3149 const struct dso_id *id)
3150{
3151 return dsos__findnew_id(&machine->dsos, filename, id);
3152}
3153
3154struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3155{
3156 return machine__findnew_dso_id(machine, filename, NULL);
3157}
3158
3159char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3160{
3161 struct machine *machine = vmachine;
3162 struct map *map;
3163 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3164
3165 if (sym == NULL)
3166 return NULL;
3167
3168 *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL;
3169 *addrp = map__unmap_ip(map, sym->start);
3170 return sym->name;
3171}
3172
3173struct machine__for_each_dso_cb_args {
3174 struct machine *machine;
3175 machine__dso_t fn;
3176 void *priv;
3177};
3178
3179static int machine__for_each_dso_cb(struct dso *dso, void *data)
3180{
3181 struct machine__for_each_dso_cb_args *args = data;
3182
3183 return args->fn(dso, args->machine, args->priv);
3184}
3185
3186int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3187{
3188 struct machine__for_each_dso_cb_args args = {
3189 .machine = machine,
3190 .fn = fn,
3191 .priv = priv,
3192 };
3193
3194 return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args);
3195}
3196
3197int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3198{
3199 struct maps *maps = machine__kernel_maps(machine);
3200
3201 return maps__for_each_map(maps, fn, priv);
3202}
3203
3204bool machine__is_lock_function(struct machine *machine, u64 addr)
3205{
3206 if (!machine->sched.text_start) {
3207 struct map *kmap;
3208 struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3209
3210 if (!sym) {
3211 /* to avoid retry */
3212 machine->sched.text_start = 1;
3213 return false;
3214 }
3215
3216 machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3217
3218 /* should not fail from here */
3219 sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3220 machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3221
3222 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3223 machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3224
3225 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3226 machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3227
3228 sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap);
3229 if (sym) {
3230 machine->traceiter.text_start = map__unmap_ip(kmap, sym->start);
3231 machine->traceiter.text_end = map__unmap_ip(kmap, sym->end);
3232 }
3233 sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap);
3234 if (sym) {
3235 machine->trace.text_start = map__unmap_ip(kmap, sym->start);
3236 machine->trace.text_end = map__unmap_ip(kmap, sym->end);
3237 }
3238 }
3239
3240 /* failed to get kernel symbols */
3241 if (machine->sched.text_start == 1)
3242 return false;
3243
3244 /* mutex and rwsem functions are in sched text section */
3245 if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3246 return true;
3247
3248 /* spinlock functions are in lock text section */
3249 if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3250 return true;
3251
3252 /* traceiter functions currently don't have their own section
3253 * but we consider them lock functions
3254 */
3255 if (machine->traceiter.text_start != 0) {
3256 if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end)
3257 return true;
3258 }
3259
3260 if (machine->trace.text_start != 0) {
3261 if (machine->trace.text_start <= addr && addr < machine->trace.text_end)
3262 return true;
3263 }
3264
3265 return false;
3266}
3267
3268int machine__hit_all_dsos(struct machine *machine)
3269{
3270 return dsos__hit_all(&machine->dsos);
3271}