Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <fcntl.h>
5#include <inttypes.h>
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <sys/types.h>
9#include <sys/stat.h>
10#include <unistd.h>
11#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12#include <api/fs/fs.h>
13#include <linux/perf_event.h>
14#include "event.h"
15#include "debug.h"
16#include "hist.h"
17#include "machine.h"
18#include "sort.h"
19#include "string2.h"
20#include "strlist.h"
21#include "thread.h"
22#include "thread_map.h"
23#include "sane_ctype.h"
24#include "symbol/kallsyms.h"
25#include "asm/bug.h"
26#include "stat.h"
27
28static const char *perf_event__names[] = {
29 [0] = "TOTAL",
30 [PERF_RECORD_MMAP] = "MMAP",
31 [PERF_RECORD_MMAP2] = "MMAP2",
32 [PERF_RECORD_LOST] = "LOST",
33 [PERF_RECORD_COMM] = "COMM",
34 [PERF_RECORD_EXIT] = "EXIT",
35 [PERF_RECORD_THROTTLE] = "THROTTLE",
36 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
37 [PERF_RECORD_FORK] = "FORK",
38 [PERF_RECORD_READ] = "READ",
39 [PERF_RECORD_SAMPLE] = "SAMPLE",
40 [PERF_RECORD_AUX] = "AUX",
41 [PERF_RECORD_ITRACE_START] = "ITRACE_START",
42 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES",
43 [PERF_RECORD_SWITCH] = "SWITCH",
44 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
45 [PERF_RECORD_NAMESPACES] = "NAMESPACES",
46 [PERF_RECORD_HEADER_ATTR] = "ATTR",
47 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
48 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
49 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
50 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
51 [PERF_RECORD_ID_INDEX] = "ID_INDEX",
52 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
53 [PERF_RECORD_AUXTRACE] = "AUXTRACE",
54 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
55 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP",
56 [PERF_RECORD_CPU_MAP] = "CPU_MAP",
57 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG",
58 [PERF_RECORD_STAT] = "STAT",
59 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
60 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
61 [PERF_RECORD_TIME_CONV] = "TIME_CONV",
62 [PERF_RECORD_HEADER_FEATURE] = "FEATURE",
63};
64
65static const char *perf_ns__names[] = {
66 [NET_NS_INDEX] = "net",
67 [UTS_NS_INDEX] = "uts",
68 [IPC_NS_INDEX] = "ipc",
69 [PID_NS_INDEX] = "pid",
70 [USER_NS_INDEX] = "user",
71 [MNT_NS_INDEX] = "mnt",
72 [CGROUP_NS_INDEX] = "cgroup",
73};
74
75const char *perf_event__name(unsigned int id)
76{
77 if (id >= ARRAY_SIZE(perf_event__names))
78 return "INVALID";
79 if (!perf_event__names[id])
80 return "UNKNOWN";
81 return perf_event__names[id];
82}
83
84static const char *perf_ns__name(unsigned int id)
85{
86 if (id >= ARRAY_SIZE(perf_ns__names))
87 return "UNKNOWN";
88 return perf_ns__names[id];
89}
90
91static int perf_tool__process_synth_event(struct perf_tool *tool,
92 union perf_event *event,
93 struct machine *machine,
94 perf_event__handler_t process)
95{
96 struct perf_sample synth_sample = {
97 .pid = -1,
98 .tid = -1,
99 .time = -1,
100 .stream_id = -1,
101 .cpu = -1,
102 .period = 1,
103 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
104 };
105
106 return process(tool, event, &synth_sample, machine);
107};
108
109/*
110 * Assumes that the first 4095 bytes of /proc/pid/stat contains
111 * the comm, tgid and ppid.
112 */
113static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
114 pid_t *tgid, pid_t *ppid)
115{
116 char filename[PATH_MAX];
117 char bf[4096];
118 int fd;
119 size_t size = 0;
120 ssize_t n;
121 char *name, *tgids, *ppids;
122
123 *tgid = -1;
124 *ppid = -1;
125
126 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
127
128 fd = open(filename, O_RDONLY);
129 if (fd < 0) {
130 pr_debug("couldn't open %s\n", filename);
131 return -1;
132 }
133
134 n = read(fd, bf, sizeof(bf) - 1);
135 close(fd);
136 if (n <= 0) {
137 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
138 pid);
139 return -1;
140 }
141 bf[n] = '\0';
142
143 name = strstr(bf, "Name:");
144 tgids = strstr(bf, "Tgid:");
145 ppids = strstr(bf, "PPid:");
146
147 if (name) {
148 char *nl;
149
150 name += 5; /* strlen("Name:") */
151 name = ltrim(name);
152
153 nl = strchr(name, '\n');
154 if (nl)
155 *nl = '\0';
156
157 size = strlen(name);
158 if (size >= len)
159 size = len - 1;
160 memcpy(comm, name, size);
161 comm[size] = '\0';
162 } else {
163 pr_debug("Name: string not found for pid %d\n", pid);
164 }
165
166 if (tgids) {
167 tgids += 5; /* strlen("Tgid:") */
168 *tgid = atoi(tgids);
169 } else {
170 pr_debug("Tgid: string not found for pid %d\n", pid);
171 }
172
173 if (ppids) {
174 ppids += 5; /* strlen("PPid:") */
175 *ppid = atoi(ppids);
176 } else {
177 pr_debug("PPid: string not found for pid %d\n", pid);
178 }
179
180 return 0;
181}
182
183static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
184 struct machine *machine,
185 pid_t *tgid, pid_t *ppid)
186{
187 size_t size;
188
189 *ppid = -1;
190
191 memset(&event->comm, 0, sizeof(event->comm));
192
193 if (machine__is_host(machine)) {
194 if (perf_event__get_comm_ids(pid, event->comm.comm,
195 sizeof(event->comm.comm),
196 tgid, ppid) != 0) {
197 return -1;
198 }
199 } else {
200 *tgid = machine->pid;
201 }
202
203 if (*tgid < 0)
204 return -1;
205
206 event->comm.pid = *tgid;
207 event->comm.header.type = PERF_RECORD_COMM;
208
209 size = strlen(event->comm.comm) + 1;
210 size = PERF_ALIGN(size, sizeof(u64));
211 memset(event->comm.comm + size, 0, machine->id_hdr_size);
212 event->comm.header.size = (sizeof(event->comm) -
213 (sizeof(event->comm.comm) - size) +
214 machine->id_hdr_size);
215 event->comm.tid = pid;
216
217 return 0;
218}
219
220pid_t perf_event__synthesize_comm(struct perf_tool *tool,
221 union perf_event *event, pid_t pid,
222 perf_event__handler_t process,
223 struct machine *machine)
224{
225 pid_t tgid, ppid;
226
227 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
228 return -1;
229
230 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
231 return -1;
232
233 return tgid;
234}
235
236static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
237 struct perf_ns_link_info *ns_link_info)
238{
239 struct stat64 st;
240 char proc_ns[128];
241
242 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
243 if (stat64(proc_ns, &st) == 0) {
244 ns_link_info->dev = st.st_dev;
245 ns_link_info->ino = st.st_ino;
246 }
247}
248
249int perf_event__synthesize_namespaces(struct perf_tool *tool,
250 union perf_event *event,
251 pid_t pid, pid_t tgid,
252 perf_event__handler_t process,
253 struct machine *machine)
254{
255 u32 idx;
256 struct perf_ns_link_info *ns_link_info;
257
258 if (!tool || !tool->namespace_events)
259 return 0;
260
261 memset(&event->namespaces, 0, (sizeof(event->namespaces) +
262 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
263 machine->id_hdr_size));
264
265 event->namespaces.pid = tgid;
266 event->namespaces.tid = pid;
267
268 event->namespaces.nr_namespaces = NR_NAMESPACES;
269
270 ns_link_info = event->namespaces.link_info;
271
272 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
273 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
274 &ns_link_info[idx]);
275
276 event->namespaces.header.type = PERF_RECORD_NAMESPACES;
277
278 event->namespaces.header.size = (sizeof(event->namespaces) +
279 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
280 machine->id_hdr_size);
281
282 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
283 return -1;
284
285 return 0;
286}
287
288static int perf_event__synthesize_fork(struct perf_tool *tool,
289 union perf_event *event,
290 pid_t pid, pid_t tgid, pid_t ppid,
291 perf_event__handler_t process,
292 struct machine *machine)
293{
294 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
295
296 /*
297 * for main thread set parent to ppid from status file. For other
298 * threads set parent pid to main thread. ie., assume main thread
299 * spawns all threads in a process
300 */
301 if (tgid == pid) {
302 event->fork.ppid = ppid;
303 event->fork.ptid = ppid;
304 } else {
305 event->fork.ppid = tgid;
306 event->fork.ptid = tgid;
307 }
308 event->fork.pid = tgid;
309 event->fork.tid = pid;
310 event->fork.header.type = PERF_RECORD_FORK;
311
312 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
313
314 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
315 return -1;
316
317 return 0;
318}
319
320int perf_event__synthesize_mmap_events(struct perf_tool *tool,
321 union perf_event *event,
322 pid_t pid, pid_t tgid,
323 perf_event__handler_t process,
324 struct machine *machine,
325 bool mmap_data,
326 unsigned int proc_map_timeout)
327{
328 char filename[PATH_MAX];
329 FILE *fp;
330 unsigned long long t;
331 bool truncation = false;
332 unsigned long long timeout = proc_map_timeout * 1000000ULL;
333 int rc = 0;
334 const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
335 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
336
337 if (machine__is_default_guest(machine))
338 return 0;
339
340 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
341 machine->root_dir, pid, pid);
342
343 fp = fopen(filename, "r");
344 if (fp == NULL) {
345 /*
346 * We raced with a task exiting - just return:
347 */
348 pr_debug("couldn't open %s\n", filename);
349 return -1;
350 }
351
352 event->header.type = PERF_RECORD_MMAP2;
353 t = rdclock();
354
355 while (1) {
356 char bf[BUFSIZ];
357 char prot[5];
358 char execname[PATH_MAX];
359 char anonstr[] = "//anon";
360 unsigned int ino;
361 size_t size;
362 ssize_t n;
363
364 if (fgets(bf, sizeof(bf), fp) == NULL)
365 break;
366
367 if ((rdclock() - t) > timeout) {
368 pr_warning("Reading %s time out. "
369 "You may want to increase "
370 "the time limit by --proc-map-timeout\n",
371 filename);
372 truncation = true;
373 goto out;
374 }
375
376 /* ensure null termination since stack will be reused. */
377 strcpy(execname, "");
378
379 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
380 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
381 &event->mmap2.start, &event->mmap2.len, prot,
382 &event->mmap2.pgoff, &event->mmap2.maj,
383 &event->mmap2.min,
384 &ino, execname);
385
386 /*
387 * Anon maps don't have the execname.
388 */
389 if (n < 7)
390 continue;
391
392 event->mmap2.ino = (u64)ino;
393
394 /*
395 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
396 */
397 if (machine__is_host(machine))
398 event->header.misc = PERF_RECORD_MISC_USER;
399 else
400 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
401
402 /* map protection and flags bits */
403 event->mmap2.prot = 0;
404 event->mmap2.flags = 0;
405 if (prot[0] == 'r')
406 event->mmap2.prot |= PROT_READ;
407 if (prot[1] == 'w')
408 event->mmap2.prot |= PROT_WRITE;
409 if (prot[2] == 'x')
410 event->mmap2.prot |= PROT_EXEC;
411
412 if (prot[3] == 's')
413 event->mmap2.flags |= MAP_SHARED;
414 else
415 event->mmap2.flags |= MAP_PRIVATE;
416
417 if (prot[2] != 'x') {
418 if (!mmap_data || prot[0] != 'r')
419 continue;
420
421 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
422 }
423
424out:
425 if (truncation)
426 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
427
428 if (!strcmp(execname, ""))
429 strcpy(execname, anonstr);
430
431 if (hugetlbfs_mnt_len &&
432 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
433 strcpy(execname, anonstr);
434 event->mmap2.flags |= MAP_HUGETLB;
435 }
436
437 size = strlen(execname) + 1;
438 memcpy(event->mmap2.filename, execname, size);
439 size = PERF_ALIGN(size, sizeof(u64));
440 event->mmap2.len -= event->mmap.start;
441 event->mmap2.header.size = (sizeof(event->mmap2) -
442 (sizeof(event->mmap2.filename) - size));
443 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
444 event->mmap2.header.size += machine->id_hdr_size;
445 event->mmap2.pid = tgid;
446 event->mmap2.tid = pid;
447
448 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
449 rc = -1;
450 break;
451 }
452
453 if (truncation)
454 break;
455 }
456
457 fclose(fp);
458 return rc;
459}
460
461int perf_event__synthesize_modules(struct perf_tool *tool,
462 perf_event__handler_t process,
463 struct machine *machine)
464{
465 int rc = 0;
466 struct map *pos;
467 struct map_groups *kmaps = &machine->kmaps;
468 struct maps *maps = &kmaps->maps[MAP__FUNCTION];
469 union perf_event *event = zalloc((sizeof(event->mmap) +
470 machine->id_hdr_size));
471 if (event == NULL) {
472 pr_debug("Not enough memory synthesizing mmap event "
473 "for kernel modules\n");
474 return -1;
475 }
476
477 event->header.type = PERF_RECORD_MMAP;
478
479 /*
480 * kernel uses 0 for user space maps, see kernel/perf_event.c
481 * __perf_event_mmap
482 */
483 if (machine__is_host(machine))
484 event->header.misc = PERF_RECORD_MISC_KERNEL;
485 else
486 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
487
488 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
489 size_t size;
490
491 if (__map__is_kernel(pos))
492 continue;
493
494 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
495 event->mmap.header.type = PERF_RECORD_MMAP;
496 event->mmap.header.size = (sizeof(event->mmap) -
497 (sizeof(event->mmap.filename) - size));
498 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
499 event->mmap.header.size += machine->id_hdr_size;
500 event->mmap.start = pos->start;
501 event->mmap.len = pos->end - pos->start;
502 event->mmap.pid = machine->pid;
503
504 memcpy(event->mmap.filename, pos->dso->long_name,
505 pos->dso->long_name_len + 1);
506 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
507 rc = -1;
508 break;
509 }
510 }
511
512 free(event);
513 return rc;
514}
515
516static int __event__synthesize_thread(union perf_event *comm_event,
517 union perf_event *mmap_event,
518 union perf_event *fork_event,
519 union perf_event *namespaces_event,
520 pid_t pid, int full,
521 perf_event__handler_t process,
522 struct perf_tool *tool,
523 struct machine *machine,
524 bool mmap_data,
525 unsigned int proc_map_timeout)
526{
527 char filename[PATH_MAX];
528 DIR *tasks;
529 struct dirent *dirent;
530 pid_t tgid, ppid;
531 int rc = 0;
532
533 /* special case: only send one comm event using passed in pid */
534 if (!full) {
535 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
536 process, machine);
537
538 if (tgid == -1)
539 return -1;
540
541 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
542 tgid, process, machine) < 0)
543 return -1;
544
545
546 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
547 process, machine, mmap_data,
548 proc_map_timeout);
549 }
550
551 if (machine__is_default_guest(machine))
552 return 0;
553
554 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
555 machine->root_dir, pid);
556
557 tasks = opendir(filename);
558 if (tasks == NULL) {
559 pr_debug("couldn't open %s\n", filename);
560 return 0;
561 }
562
563 while ((dirent = readdir(tasks)) != NULL) {
564 char *end;
565 pid_t _pid;
566
567 _pid = strtol(dirent->d_name, &end, 10);
568 if (*end)
569 continue;
570
571 rc = -1;
572 if (perf_event__prepare_comm(comm_event, _pid, machine,
573 &tgid, &ppid) != 0)
574 break;
575
576 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
577 ppid, process, machine) < 0)
578 break;
579
580 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
581 tgid, process, machine) < 0)
582 break;
583
584 /*
585 * Send the prepared comm event
586 */
587 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
588 break;
589
590 rc = 0;
591 if (_pid == pid) {
592 /* process the parent's maps too */
593 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
594 process, machine, mmap_data, proc_map_timeout);
595 if (rc)
596 break;
597 }
598 }
599
600 closedir(tasks);
601 return rc;
602}
603
604int perf_event__synthesize_thread_map(struct perf_tool *tool,
605 struct thread_map *threads,
606 perf_event__handler_t process,
607 struct machine *machine,
608 bool mmap_data,
609 unsigned int proc_map_timeout)
610{
611 union perf_event *comm_event, *mmap_event, *fork_event;
612 union perf_event *namespaces_event;
613 int err = -1, thread, j;
614
615 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
616 if (comm_event == NULL)
617 goto out;
618
619 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
620 if (mmap_event == NULL)
621 goto out_free_comm;
622
623 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
624 if (fork_event == NULL)
625 goto out_free_mmap;
626
627 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
628 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
629 machine->id_hdr_size);
630 if (namespaces_event == NULL)
631 goto out_free_fork;
632
633 err = 0;
634 for (thread = 0; thread < threads->nr; ++thread) {
635 if (__event__synthesize_thread(comm_event, mmap_event,
636 fork_event, namespaces_event,
637 thread_map__pid(threads, thread), 0,
638 process, tool, machine,
639 mmap_data, proc_map_timeout)) {
640 err = -1;
641 break;
642 }
643
644 /*
645 * comm.pid is set to thread group id by
646 * perf_event__synthesize_comm
647 */
648 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
649 bool need_leader = true;
650
651 /* is thread group leader in thread_map? */
652 for (j = 0; j < threads->nr; ++j) {
653 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
654 need_leader = false;
655 break;
656 }
657 }
658
659 /* if not, generate events for it */
660 if (need_leader &&
661 __event__synthesize_thread(comm_event, mmap_event,
662 fork_event, namespaces_event,
663 comm_event->comm.pid, 0,
664 process, tool, machine,
665 mmap_data, proc_map_timeout)) {
666 err = -1;
667 break;
668 }
669 }
670 }
671 free(namespaces_event);
672out_free_fork:
673 free(fork_event);
674out_free_mmap:
675 free(mmap_event);
676out_free_comm:
677 free(comm_event);
678out:
679 return err;
680}
681
682static int __perf_event__synthesize_threads(struct perf_tool *tool,
683 perf_event__handler_t process,
684 struct machine *machine,
685 bool mmap_data,
686 unsigned int proc_map_timeout,
687 struct dirent **dirent,
688 int start,
689 int num)
690{
691 union perf_event *comm_event, *mmap_event, *fork_event;
692 union perf_event *namespaces_event;
693 int err = -1;
694 char *end;
695 pid_t pid;
696 int i;
697
698 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
699 if (comm_event == NULL)
700 goto out;
701
702 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
703 if (mmap_event == NULL)
704 goto out_free_comm;
705
706 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
707 if (fork_event == NULL)
708 goto out_free_mmap;
709
710 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
711 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
712 machine->id_hdr_size);
713 if (namespaces_event == NULL)
714 goto out_free_fork;
715
716 for (i = start; i < start + num; i++) {
717 if (!isdigit(dirent[i]->d_name[0]))
718 continue;
719
720 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
721 /* only interested in proper numerical dirents */
722 if (*end)
723 continue;
724 /*
725 * We may race with exiting thread, so don't stop just because
726 * one thread couldn't be synthesized.
727 */
728 __event__synthesize_thread(comm_event, mmap_event, fork_event,
729 namespaces_event, pid, 1, process,
730 tool, machine, mmap_data,
731 proc_map_timeout);
732 }
733 err = 0;
734
735 free(namespaces_event);
736out_free_fork:
737 free(fork_event);
738out_free_mmap:
739 free(mmap_event);
740out_free_comm:
741 free(comm_event);
742out:
743 return err;
744}
745
746struct synthesize_threads_arg {
747 struct perf_tool *tool;
748 perf_event__handler_t process;
749 struct machine *machine;
750 bool mmap_data;
751 unsigned int proc_map_timeout;
752 struct dirent **dirent;
753 int num;
754 int start;
755};
756
757static void *synthesize_threads_worker(void *arg)
758{
759 struct synthesize_threads_arg *args = arg;
760
761 __perf_event__synthesize_threads(args->tool, args->process,
762 args->machine, args->mmap_data,
763 args->proc_map_timeout, args->dirent,
764 args->start, args->num);
765 return NULL;
766}
767
768int perf_event__synthesize_threads(struct perf_tool *tool,
769 perf_event__handler_t process,
770 struct machine *machine,
771 bool mmap_data,
772 unsigned int proc_map_timeout,
773 unsigned int nr_threads_synthesize)
774{
775 struct synthesize_threads_arg *args = NULL;
776 pthread_t *synthesize_threads = NULL;
777 char proc_path[PATH_MAX];
778 struct dirent **dirent;
779 int num_per_thread;
780 int m, n, i, j;
781 int thread_nr;
782 int base = 0;
783 int err = -1;
784
785
786 if (machine__is_default_guest(machine))
787 return 0;
788
789 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
790 n = scandir(proc_path, &dirent, 0, alphasort);
791 if (n < 0)
792 return err;
793
794 if (nr_threads_synthesize == UINT_MAX)
795 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
796 else
797 thread_nr = nr_threads_synthesize;
798
799 if (thread_nr <= 1) {
800 err = __perf_event__synthesize_threads(tool, process,
801 machine, mmap_data,
802 proc_map_timeout,
803 dirent, base, n);
804 goto free_dirent;
805 }
806 if (thread_nr > n)
807 thread_nr = n;
808
809 synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
810 if (synthesize_threads == NULL)
811 goto free_dirent;
812
813 args = calloc(sizeof(*args), thread_nr);
814 if (args == NULL)
815 goto free_threads;
816
817 num_per_thread = n / thread_nr;
818 m = n % thread_nr;
819 for (i = 0; i < thread_nr; i++) {
820 args[i].tool = tool;
821 args[i].process = process;
822 args[i].machine = machine;
823 args[i].mmap_data = mmap_data;
824 args[i].proc_map_timeout = proc_map_timeout;
825 args[i].dirent = dirent;
826 }
827 for (i = 0; i < m; i++) {
828 args[i].num = num_per_thread + 1;
829 args[i].start = i * args[i].num;
830 }
831 if (i != 0)
832 base = args[i-1].start + args[i-1].num;
833 for (j = i; j < thread_nr; j++) {
834 args[j].num = num_per_thread;
835 args[j].start = base + (j - i) * args[i].num;
836 }
837
838 for (i = 0; i < thread_nr; i++) {
839 if (pthread_create(&synthesize_threads[i], NULL,
840 synthesize_threads_worker, &args[i]))
841 goto out_join;
842 }
843 err = 0;
844out_join:
845 for (i = 0; i < thread_nr; i++)
846 pthread_join(synthesize_threads[i], NULL);
847 free(args);
848free_threads:
849 free(synthesize_threads);
850free_dirent:
851 for (i = 0; i < n; i++)
852 free(dirent[i]);
853 free(dirent);
854
855 return err;
856}
857
858struct process_symbol_args {
859 const char *name;
860 u64 start;
861};
862
863static int find_symbol_cb(void *arg, const char *name, char type,
864 u64 start)
865{
866 struct process_symbol_args *args = arg;
867
868 /*
869 * Must be a function or at least an alias, as in PARISC64, where "_text" is
870 * an 'A' to the same address as "_stext".
871 */
872 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
873 type == 'A') || strcmp(name, args->name))
874 return 0;
875
876 args->start = start;
877 return 1;
878}
879
880int kallsyms__get_function_start(const char *kallsyms_filename,
881 const char *symbol_name, u64 *addr)
882{
883 struct process_symbol_args args = { .name = symbol_name, };
884
885 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
886 return -1;
887
888 *addr = args.start;
889 return 0;
890}
891
892int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
893 perf_event__handler_t process,
894 struct machine *machine)
895{
896 size_t size;
897 struct map *map = machine__kernel_map(machine);
898 struct kmap *kmap;
899 int err;
900 union perf_event *event;
901
902 if (symbol_conf.kptr_restrict)
903 return -1;
904 if (map == NULL)
905 return -1;
906
907 /*
908 * We should get this from /sys/kernel/sections/.text, but till that is
909 * available use this, and after it is use this as a fallback for older
910 * kernels.
911 */
912 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
913 if (event == NULL) {
914 pr_debug("Not enough memory synthesizing mmap event "
915 "for kernel modules\n");
916 return -1;
917 }
918
919 if (machine__is_host(machine)) {
920 /*
921 * kernel uses PERF_RECORD_MISC_USER for user space maps,
922 * see kernel/perf_event.c __perf_event_mmap
923 */
924 event->header.misc = PERF_RECORD_MISC_KERNEL;
925 } else {
926 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
927 }
928
929 kmap = map__kmap(map);
930 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
931 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
932 size = PERF_ALIGN(size, sizeof(u64));
933 event->mmap.header.type = PERF_RECORD_MMAP;
934 event->mmap.header.size = (sizeof(event->mmap) -
935 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
936 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
937 event->mmap.start = map->start;
938 event->mmap.len = map->end - event->mmap.start;
939 event->mmap.pid = machine->pid;
940
941 err = perf_tool__process_synth_event(tool, event, machine, process);
942 free(event);
943
944 return err;
945}
946
947int perf_event__synthesize_thread_map2(struct perf_tool *tool,
948 struct thread_map *threads,
949 perf_event__handler_t process,
950 struct machine *machine)
951{
952 union perf_event *event;
953 int i, err, size;
954
955 size = sizeof(event->thread_map);
956 size += threads->nr * sizeof(event->thread_map.entries[0]);
957
958 event = zalloc(size);
959 if (!event)
960 return -ENOMEM;
961
962 event->header.type = PERF_RECORD_THREAD_MAP;
963 event->header.size = size;
964 event->thread_map.nr = threads->nr;
965
966 for (i = 0; i < threads->nr; i++) {
967 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
968 char *comm = thread_map__comm(threads, i);
969
970 if (!comm)
971 comm = (char *) "";
972
973 entry->pid = thread_map__pid(threads, i);
974 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
975 }
976
977 err = process(tool, event, NULL, machine);
978
979 free(event);
980 return err;
981}
982
983static void synthesize_cpus(struct cpu_map_entries *cpus,
984 struct cpu_map *map)
985{
986 int i;
987
988 cpus->nr = map->nr;
989
990 for (i = 0; i < map->nr; i++)
991 cpus->cpu[i] = map->map[i];
992}
993
994static void synthesize_mask(struct cpu_map_mask *mask,
995 struct cpu_map *map, int max)
996{
997 int i;
998
999 mask->nr = BITS_TO_LONGS(max);
1000 mask->long_size = sizeof(long);
1001
1002 for (i = 0; i < map->nr; i++)
1003 set_bit(map->map[i], mask->mask);
1004}
1005
1006static size_t cpus_size(struct cpu_map *map)
1007{
1008 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1009}
1010
1011static size_t mask_size(struct cpu_map *map, int *max)
1012{
1013 int i;
1014
1015 *max = 0;
1016
1017 for (i = 0; i < map->nr; i++) {
1018 /* bit possition of the cpu is + 1 */
1019 int bit = map->map[i] + 1;
1020
1021 if (bit > *max)
1022 *max = bit;
1023 }
1024
1025 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
1026}
1027
1028void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
1029{
1030 size_t size_cpus, size_mask;
1031 bool is_dummy = cpu_map__empty(map);
1032
1033 /*
1034 * Both array and mask data have variable size based
1035 * on the number of cpus and their actual values.
1036 * The size of the 'struct cpu_map_data' is:
1037 *
1038 * array = size of 'struct cpu_map_entries' +
1039 * number of cpus * sizeof(u64)
1040 *
1041 * mask = size of 'struct cpu_map_mask' +
1042 * maximum cpu bit converted to size of longs
1043 *
1044 * and finaly + the size of 'struct cpu_map_data'.
1045 */
1046 size_cpus = cpus_size(map);
1047 size_mask = mask_size(map, max);
1048
1049 if (is_dummy || (size_cpus < size_mask)) {
1050 *size += size_cpus;
1051 *type = PERF_CPU_MAP__CPUS;
1052 } else {
1053 *size += size_mask;
1054 *type = PERF_CPU_MAP__MASK;
1055 }
1056
1057 *size += sizeof(struct cpu_map_data);
1058 return zalloc(*size);
1059}
1060
1061void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
1062 u16 type, int max)
1063{
1064 data->type = type;
1065
1066 switch (type) {
1067 case PERF_CPU_MAP__CPUS:
1068 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1069 break;
1070 case PERF_CPU_MAP__MASK:
1071 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
1072 default:
1073 break;
1074 };
1075}
1076
1077static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
1078{
1079 size_t size = sizeof(struct cpu_map_event);
1080 struct cpu_map_event *event;
1081 int max;
1082 u16 type;
1083
1084 event = cpu_map_data__alloc(map, &size, &type, &max);
1085 if (!event)
1086 return NULL;
1087
1088 event->header.type = PERF_RECORD_CPU_MAP;
1089 event->header.size = size;
1090 event->data.type = type;
1091
1092 cpu_map_data__synthesize(&event->data, map, type, max);
1093 return event;
1094}
1095
1096int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1097 struct cpu_map *map,
1098 perf_event__handler_t process,
1099 struct machine *machine)
1100{
1101 struct cpu_map_event *event;
1102 int err;
1103
1104 event = cpu_map_event__new(map);
1105 if (!event)
1106 return -ENOMEM;
1107
1108 err = process(tool, (union perf_event *) event, NULL, machine);
1109
1110 free(event);
1111 return err;
1112}
1113
1114int perf_event__synthesize_stat_config(struct perf_tool *tool,
1115 struct perf_stat_config *config,
1116 perf_event__handler_t process,
1117 struct machine *machine)
1118{
1119 struct stat_config_event *event;
1120 int size, i = 0, err;
1121
1122 size = sizeof(*event);
1123 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1124
1125 event = zalloc(size);
1126 if (!event)
1127 return -ENOMEM;
1128
1129 event->header.type = PERF_RECORD_STAT_CONFIG;
1130 event->header.size = size;
1131 event->nr = PERF_STAT_CONFIG_TERM__MAX;
1132
1133#define ADD(__term, __val) \
1134 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
1135 event->data[i].val = __val; \
1136 i++;
1137
1138 ADD(AGGR_MODE, config->aggr_mode)
1139 ADD(INTERVAL, config->interval)
1140 ADD(SCALE, config->scale)
1141
1142 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1143 "stat config terms unbalanced\n");
1144#undef ADD
1145
1146 err = process(tool, (union perf_event *) event, NULL, machine);
1147
1148 free(event);
1149 return err;
1150}
1151
1152int perf_event__synthesize_stat(struct perf_tool *tool,
1153 u32 cpu, u32 thread, u64 id,
1154 struct perf_counts_values *count,
1155 perf_event__handler_t process,
1156 struct machine *machine)
1157{
1158 struct stat_event event;
1159
1160 event.header.type = PERF_RECORD_STAT;
1161 event.header.size = sizeof(event);
1162 event.header.misc = 0;
1163
1164 event.id = id;
1165 event.cpu = cpu;
1166 event.thread = thread;
1167 event.val = count->val;
1168 event.ena = count->ena;
1169 event.run = count->run;
1170
1171 return process(tool, (union perf_event *) &event, NULL, machine);
1172}
1173
1174int perf_event__synthesize_stat_round(struct perf_tool *tool,
1175 u64 evtime, u64 type,
1176 perf_event__handler_t process,
1177 struct machine *machine)
1178{
1179 struct stat_round_event event;
1180
1181 event.header.type = PERF_RECORD_STAT_ROUND;
1182 event.header.size = sizeof(event);
1183 event.header.misc = 0;
1184
1185 event.time = evtime;
1186 event.type = type;
1187
1188 return process(tool, (union perf_event *) &event, NULL, machine);
1189}
1190
1191void perf_event__read_stat_config(struct perf_stat_config *config,
1192 struct stat_config_event *event)
1193{
1194 unsigned i;
1195
1196 for (i = 0; i < event->nr; i++) {
1197
1198 switch (event->data[i].tag) {
1199#define CASE(__term, __val) \
1200 case PERF_STAT_CONFIG_TERM__##__term: \
1201 config->__val = event->data[i].val; \
1202 break;
1203
1204 CASE(AGGR_MODE, aggr_mode)
1205 CASE(SCALE, scale)
1206 CASE(INTERVAL, interval)
1207#undef CASE
1208 default:
1209 pr_warning("unknown stat config term %" PRIu64 "\n",
1210 event->data[i].tag);
1211 }
1212 }
1213}
1214
1215size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1216{
1217 const char *s;
1218
1219 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1220 s = " exec";
1221 else
1222 s = "";
1223
1224 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1225}
1226
1227size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1228{
1229 size_t ret = 0;
1230 struct perf_ns_link_info *ns_link_info;
1231 u32 nr_namespaces, idx;
1232
1233 ns_link_info = event->namespaces.link_info;
1234 nr_namespaces = event->namespaces.nr_namespaces;
1235
1236 ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1237 event->namespaces.pid,
1238 event->namespaces.tid,
1239 nr_namespaces);
1240
1241 for (idx = 0; idx < nr_namespaces; idx++) {
1242 if (idx && (idx % 4 == 0))
1243 ret += fprintf(fp, "\n\t\t ");
1244
1245 ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1246 perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1247 (u64)ns_link_info[idx].ino,
1248 ((idx + 1) != nr_namespaces) ? ", " : "]\n");
1249 }
1250
1251 return ret;
1252}
1253
1254int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1255 union perf_event *event,
1256 struct perf_sample *sample,
1257 struct machine *machine)
1258{
1259 return machine__process_comm_event(machine, event, sample);
1260}
1261
1262int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1263 union perf_event *event,
1264 struct perf_sample *sample,
1265 struct machine *machine)
1266{
1267 return machine__process_namespaces_event(machine, event, sample);
1268}
1269
1270int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1271 union perf_event *event,
1272 struct perf_sample *sample,
1273 struct machine *machine)
1274{
1275 return machine__process_lost_event(machine, event, sample);
1276}
1277
1278int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1279 union perf_event *event,
1280 struct perf_sample *sample __maybe_unused,
1281 struct machine *machine)
1282{
1283 return machine__process_aux_event(machine, event);
1284}
1285
1286int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1287 union perf_event *event,
1288 struct perf_sample *sample __maybe_unused,
1289 struct machine *machine)
1290{
1291 return machine__process_itrace_start_event(machine, event);
1292}
1293
1294int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1295 union perf_event *event,
1296 struct perf_sample *sample,
1297 struct machine *machine)
1298{
1299 return machine__process_lost_samples_event(machine, event, sample);
1300}
1301
1302int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1303 union perf_event *event,
1304 struct perf_sample *sample __maybe_unused,
1305 struct machine *machine)
1306{
1307 return machine__process_switch_event(machine, event);
1308}
1309
1310size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1311{
1312 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1313 event->mmap.pid, event->mmap.tid, event->mmap.start,
1314 event->mmap.len, event->mmap.pgoff,
1315 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1316 event->mmap.filename);
1317}
1318
1319size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1320{
1321 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1322 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1323 event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1324 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1325 event->mmap2.min, event->mmap2.ino,
1326 event->mmap2.ino_generation,
1327 (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1328 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1329 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1330 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1331 event->mmap2.filename);
1332}
1333
1334size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1335{
1336 struct thread_map *threads = thread_map__new_event(&event->thread_map);
1337 size_t ret;
1338
1339 ret = fprintf(fp, " nr: ");
1340
1341 if (threads)
1342 ret += thread_map__fprintf(threads, fp);
1343 else
1344 ret += fprintf(fp, "failed to get threads from event\n");
1345
1346 thread_map__put(threads);
1347 return ret;
1348}
1349
1350size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1351{
1352 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1353 size_t ret;
1354
1355 ret = fprintf(fp, ": ");
1356
1357 if (cpus)
1358 ret += cpu_map__fprintf(cpus, fp);
1359 else
1360 ret += fprintf(fp, "failed to get cpumap from event\n");
1361
1362 cpu_map__put(cpus);
1363 return ret;
1364}
1365
1366int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1367 union perf_event *event,
1368 struct perf_sample *sample,
1369 struct machine *machine)
1370{
1371 return machine__process_mmap_event(machine, event, sample);
1372}
1373
1374int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1375 union perf_event *event,
1376 struct perf_sample *sample,
1377 struct machine *machine)
1378{
1379 return machine__process_mmap2_event(machine, event, sample);
1380}
1381
1382size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1383{
1384 return fprintf(fp, "(%d:%d):(%d:%d)\n",
1385 event->fork.pid, event->fork.tid,
1386 event->fork.ppid, event->fork.ptid);
1387}
1388
1389int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1390 union perf_event *event,
1391 struct perf_sample *sample,
1392 struct machine *machine)
1393{
1394 return machine__process_fork_event(machine, event, sample);
1395}
1396
1397int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1398 union perf_event *event,
1399 struct perf_sample *sample,
1400 struct machine *machine)
1401{
1402 return machine__process_exit_event(machine, event, sample);
1403}
1404
1405size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1406{
1407 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1408 event->aux.aux_offset, event->aux.aux_size,
1409 event->aux.flags,
1410 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1411 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1412 event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : "");
1413}
1414
1415size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1416{
1417 return fprintf(fp, " pid: %u tid: %u\n",
1418 event->itrace_start.pid, event->itrace_start.tid);
1419}
1420
1421size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1422{
1423 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1424 const char *in_out = !out ? "IN " :
1425 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1426 "OUT " : "OUT preempt";
1427
1428 if (event->header.type == PERF_RECORD_SWITCH)
1429 return fprintf(fp, " %s\n", in_out);
1430
1431 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n",
1432 in_out, out ? "next" : "prev",
1433 event->context_switch.next_prev_pid,
1434 event->context_switch.next_prev_tid);
1435}
1436
1437static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
1438{
1439 return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
1440}
1441
1442size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1443{
1444 size_t ret = fprintf(fp, "PERF_RECORD_%s",
1445 perf_event__name(event->header.type));
1446
1447 switch (event->header.type) {
1448 case PERF_RECORD_COMM:
1449 ret += perf_event__fprintf_comm(event, fp);
1450 break;
1451 case PERF_RECORD_FORK:
1452 case PERF_RECORD_EXIT:
1453 ret += perf_event__fprintf_task(event, fp);
1454 break;
1455 case PERF_RECORD_MMAP:
1456 ret += perf_event__fprintf_mmap(event, fp);
1457 break;
1458 case PERF_RECORD_NAMESPACES:
1459 ret += perf_event__fprintf_namespaces(event, fp);
1460 break;
1461 case PERF_RECORD_MMAP2:
1462 ret += perf_event__fprintf_mmap2(event, fp);
1463 break;
1464 case PERF_RECORD_AUX:
1465 ret += perf_event__fprintf_aux(event, fp);
1466 break;
1467 case PERF_RECORD_ITRACE_START:
1468 ret += perf_event__fprintf_itrace_start(event, fp);
1469 break;
1470 case PERF_RECORD_SWITCH:
1471 case PERF_RECORD_SWITCH_CPU_WIDE:
1472 ret += perf_event__fprintf_switch(event, fp);
1473 break;
1474 case PERF_RECORD_LOST:
1475 ret += perf_event__fprintf_lost(event, fp);
1476 break;
1477 default:
1478 ret += fprintf(fp, "\n");
1479 }
1480
1481 return ret;
1482}
1483
1484int perf_event__process(struct perf_tool *tool __maybe_unused,
1485 union perf_event *event,
1486 struct perf_sample *sample,
1487 struct machine *machine)
1488{
1489 return machine__process_event(machine, event, sample);
1490}
1491
1492void thread__find_addr_map(struct thread *thread, u8 cpumode,
1493 enum map_type type, u64 addr,
1494 struct addr_location *al)
1495{
1496 struct map_groups *mg = thread->mg;
1497 struct machine *machine = mg->machine;
1498 bool load_map = false;
1499
1500 al->machine = machine;
1501 al->thread = thread;
1502 al->addr = addr;
1503 al->cpumode = cpumode;
1504 al->filtered = 0;
1505
1506 if (machine == NULL) {
1507 al->map = NULL;
1508 return;
1509 }
1510
1511 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1512 al->level = 'k';
1513 mg = &machine->kmaps;
1514 load_map = true;
1515 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1516 al->level = '.';
1517 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1518 al->level = 'g';
1519 mg = &machine->kmaps;
1520 load_map = true;
1521 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1522 al->level = 'u';
1523 } else {
1524 al->level = 'H';
1525 al->map = NULL;
1526
1527 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1528 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1529 !perf_guest)
1530 al->filtered |= (1 << HIST_FILTER__GUEST);
1531 if ((cpumode == PERF_RECORD_MISC_USER ||
1532 cpumode == PERF_RECORD_MISC_KERNEL) &&
1533 !perf_host)
1534 al->filtered |= (1 << HIST_FILTER__HOST);
1535
1536 return;
1537 }
1538try_again:
1539 al->map = map_groups__find(mg, type, al->addr);
1540 if (al->map == NULL) {
1541 /*
1542 * If this is outside of all known maps, and is a negative
1543 * address, try to look it up in the kernel dso, as it might be
1544 * a vsyscall or vdso (which executes in user-mode).
1545 *
1546 * XXX This is nasty, we should have a symbol list in the
1547 * "[vdso]" dso, but for now lets use the old trick of looking
1548 * in the whole kernel symbol list.
1549 */
1550 if (cpumode == PERF_RECORD_MISC_USER && machine &&
1551 mg != &machine->kmaps &&
1552 machine__kernel_ip(machine, al->addr)) {
1553 mg = &machine->kmaps;
1554 load_map = true;
1555 goto try_again;
1556 }
1557 } else {
1558 /*
1559 * Kernel maps might be changed when loading symbols so loading
1560 * must be done prior to using kernel maps.
1561 */
1562 if (load_map)
1563 map__load(al->map);
1564 al->addr = al->map->map_ip(al->map, al->addr);
1565 }
1566}
1567
1568void thread__find_addr_location(struct thread *thread,
1569 u8 cpumode, enum map_type type, u64 addr,
1570 struct addr_location *al)
1571{
1572 thread__find_addr_map(thread, cpumode, type, addr, al);
1573 if (al->map != NULL)
1574 al->sym = map__find_symbol(al->map, al->addr);
1575 else
1576 al->sym = NULL;
1577}
1578
1579/*
1580 * Callers need to drop the reference to al->thread, obtained in
1581 * machine__findnew_thread()
1582 */
1583int machine__resolve(struct machine *machine, struct addr_location *al,
1584 struct perf_sample *sample)
1585{
1586 struct thread *thread = machine__findnew_thread(machine, sample->pid,
1587 sample->tid);
1588
1589 if (thread == NULL)
1590 return -1;
1591
1592 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1593 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1594 dump_printf(" ...... dso: %s\n",
1595 al->map ? al->map->dso->long_name :
1596 al->level == 'H' ? "[hypervisor]" : "<not found>");
1597
1598 if (thread__is_filtered(thread))
1599 al->filtered |= (1 << HIST_FILTER__THREAD);
1600
1601 al->sym = NULL;
1602 al->cpu = sample->cpu;
1603 al->socket = -1;
1604 al->srcline = NULL;
1605
1606 if (al->cpu >= 0) {
1607 struct perf_env *env = machine->env;
1608
1609 if (env && env->cpu)
1610 al->socket = env->cpu[al->cpu].socket_id;
1611 }
1612
1613 if (al->map) {
1614 struct dso *dso = al->map->dso;
1615
1616 if (symbol_conf.dso_list &&
1617 (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1618 dso->short_name) ||
1619 (dso->short_name != dso->long_name &&
1620 strlist__has_entry(symbol_conf.dso_list,
1621 dso->long_name))))) {
1622 al->filtered |= (1 << HIST_FILTER__DSO);
1623 }
1624
1625 al->sym = map__find_symbol(al->map, al->addr);
1626 }
1627
1628 if (symbol_conf.sym_list &&
1629 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1630 al->sym->name))) {
1631 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1632 }
1633
1634 return 0;
1635}
1636
1637/*
1638 * The preprocess_sample method will return with reference counts for the
1639 * in it, when done using (and perhaps getting ref counts if needing to
1640 * keep a pointer to one of those entries) it must be paired with
1641 * addr_location__put(), so that the refcounts can be decremented.
1642 */
1643void addr_location__put(struct addr_location *al)
1644{
1645 thread__zput(al->thread);
1646}
1647
1648bool is_bts_event(struct perf_event_attr *attr)
1649{
1650 return attr->type == PERF_TYPE_HARDWARE &&
1651 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1652 attr->sample_period == 1;
1653}
1654
1655bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1656{
1657 if (attr->type == PERF_TYPE_SOFTWARE &&
1658 (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1659 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1660 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1661 return true;
1662
1663 if (is_bts_event(attr))
1664 return true;
1665
1666 return false;
1667}
1668
1669void thread__resolve(struct thread *thread, struct addr_location *al,
1670 struct perf_sample *sample)
1671{
1672 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1673 if (!al->map)
1674 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1675 sample->addr, al);
1676
1677 al->cpu = sample->cpu;
1678 al->sym = NULL;
1679
1680 if (al->map)
1681 al->sym = map__find_symbol(al->map, al->addr);
1682}
1#include <linux/types.h>
2#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
3#include <api/fs/fs.h>
4#include "event.h"
5#include "debug.h"
6#include "hist.h"
7#include "machine.h"
8#include "sort.h"
9#include "string.h"
10#include "strlist.h"
11#include "thread.h"
12#include "thread_map.h"
13#include "symbol/kallsyms.h"
14#include "asm/bug.h"
15#include "stat.h"
16
17static const char *perf_event__names[] = {
18 [0] = "TOTAL",
19 [PERF_RECORD_MMAP] = "MMAP",
20 [PERF_RECORD_MMAP2] = "MMAP2",
21 [PERF_RECORD_LOST] = "LOST",
22 [PERF_RECORD_COMM] = "COMM",
23 [PERF_RECORD_EXIT] = "EXIT",
24 [PERF_RECORD_THROTTLE] = "THROTTLE",
25 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
26 [PERF_RECORD_FORK] = "FORK",
27 [PERF_RECORD_READ] = "READ",
28 [PERF_RECORD_SAMPLE] = "SAMPLE",
29 [PERF_RECORD_AUX] = "AUX",
30 [PERF_RECORD_ITRACE_START] = "ITRACE_START",
31 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES",
32 [PERF_RECORD_SWITCH] = "SWITCH",
33 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
34 [PERF_RECORD_HEADER_ATTR] = "ATTR",
35 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
36 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
37 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
38 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
39 [PERF_RECORD_ID_INDEX] = "ID_INDEX",
40 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
41 [PERF_RECORD_AUXTRACE] = "AUXTRACE",
42 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
43 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP",
44 [PERF_RECORD_CPU_MAP] = "CPU_MAP",
45 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG",
46 [PERF_RECORD_STAT] = "STAT",
47 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
48 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
49 [PERF_RECORD_TIME_CONV] = "TIME_CONV",
50};
51
52const char *perf_event__name(unsigned int id)
53{
54 if (id >= ARRAY_SIZE(perf_event__names))
55 return "INVALID";
56 if (!perf_event__names[id])
57 return "UNKNOWN";
58 return perf_event__names[id];
59}
60
61static int perf_tool__process_synth_event(struct perf_tool *tool,
62 union perf_event *event,
63 struct machine *machine,
64 perf_event__handler_t process)
65{
66 struct perf_sample synth_sample = {
67 .pid = -1,
68 .tid = -1,
69 .time = -1,
70 .stream_id = -1,
71 .cpu = -1,
72 .period = 1,
73 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
74 };
75
76 return process(tool, event, &synth_sample, machine);
77};
78
79/*
80 * Assumes that the first 4095 bytes of /proc/pid/stat contains
81 * the comm, tgid and ppid.
82 */
83static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
84 pid_t *tgid, pid_t *ppid)
85{
86 char filename[PATH_MAX];
87 char bf[4096];
88 int fd;
89 size_t size = 0;
90 ssize_t n;
91 char *nl, *name, *tgids, *ppids;
92
93 *tgid = -1;
94 *ppid = -1;
95
96 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
97
98 fd = open(filename, O_RDONLY);
99 if (fd < 0) {
100 pr_debug("couldn't open %s\n", filename);
101 return -1;
102 }
103
104 n = read(fd, bf, sizeof(bf) - 1);
105 close(fd);
106 if (n <= 0) {
107 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
108 pid);
109 return -1;
110 }
111 bf[n] = '\0';
112
113 name = strstr(bf, "Name:");
114 tgids = strstr(bf, "Tgid:");
115 ppids = strstr(bf, "PPid:");
116
117 if (name) {
118 name += 5; /* strlen("Name:") */
119
120 while (*name && isspace(*name))
121 ++name;
122
123 nl = strchr(name, '\n');
124 if (nl)
125 *nl = '\0';
126
127 size = strlen(name);
128 if (size >= len)
129 size = len - 1;
130 memcpy(comm, name, size);
131 comm[size] = '\0';
132 } else {
133 pr_debug("Name: string not found for pid %d\n", pid);
134 }
135
136 if (tgids) {
137 tgids += 5; /* strlen("Tgid:") */
138 *tgid = atoi(tgids);
139 } else {
140 pr_debug("Tgid: string not found for pid %d\n", pid);
141 }
142
143 if (ppids) {
144 ppids += 5; /* strlen("PPid:") */
145 *ppid = atoi(ppids);
146 } else {
147 pr_debug("PPid: string not found for pid %d\n", pid);
148 }
149
150 return 0;
151}
152
153static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
154 struct machine *machine,
155 pid_t *tgid, pid_t *ppid)
156{
157 size_t size;
158
159 *ppid = -1;
160
161 memset(&event->comm, 0, sizeof(event->comm));
162
163 if (machine__is_host(machine)) {
164 if (perf_event__get_comm_ids(pid, event->comm.comm,
165 sizeof(event->comm.comm),
166 tgid, ppid) != 0) {
167 return -1;
168 }
169 } else {
170 *tgid = machine->pid;
171 }
172
173 if (*tgid < 0)
174 return -1;
175
176 event->comm.pid = *tgid;
177 event->comm.header.type = PERF_RECORD_COMM;
178
179 size = strlen(event->comm.comm) + 1;
180 size = PERF_ALIGN(size, sizeof(u64));
181 memset(event->comm.comm + size, 0, machine->id_hdr_size);
182 event->comm.header.size = (sizeof(event->comm) -
183 (sizeof(event->comm.comm) - size) +
184 machine->id_hdr_size);
185 event->comm.tid = pid;
186
187 return 0;
188}
189
190pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191 union perf_event *event, pid_t pid,
192 perf_event__handler_t process,
193 struct machine *machine)
194{
195 pid_t tgid, ppid;
196
197 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
198 return -1;
199
200 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
201 return -1;
202
203 return tgid;
204}
205
206static int perf_event__synthesize_fork(struct perf_tool *tool,
207 union perf_event *event,
208 pid_t pid, pid_t tgid, pid_t ppid,
209 perf_event__handler_t process,
210 struct machine *machine)
211{
212 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
213
214 /*
215 * for main thread set parent to ppid from status file. For other
216 * threads set parent pid to main thread. ie., assume main thread
217 * spawns all threads in a process
218 */
219 if (tgid == pid) {
220 event->fork.ppid = ppid;
221 event->fork.ptid = ppid;
222 } else {
223 event->fork.ppid = tgid;
224 event->fork.ptid = tgid;
225 }
226 event->fork.pid = tgid;
227 event->fork.tid = pid;
228 event->fork.header.type = PERF_RECORD_FORK;
229
230 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
231
232 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
233 return -1;
234
235 return 0;
236}
237
238int perf_event__synthesize_mmap_events(struct perf_tool *tool,
239 union perf_event *event,
240 pid_t pid, pid_t tgid,
241 perf_event__handler_t process,
242 struct machine *machine,
243 bool mmap_data,
244 unsigned int proc_map_timeout)
245{
246 char filename[PATH_MAX];
247 FILE *fp;
248 unsigned long long t;
249 bool truncation = false;
250 unsigned long long timeout = proc_map_timeout * 1000000ULL;
251 int rc = 0;
252 const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
253 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
254
255 if (machine__is_default_guest(machine))
256 return 0;
257
258 snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
259 machine->root_dir, pid);
260
261 fp = fopen(filename, "r");
262 if (fp == NULL) {
263 /*
264 * We raced with a task exiting - just return:
265 */
266 pr_debug("couldn't open %s\n", filename);
267 return -1;
268 }
269
270 event->header.type = PERF_RECORD_MMAP2;
271 t = rdclock();
272
273 while (1) {
274 char bf[BUFSIZ];
275 char prot[5];
276 char execname[PATH_MAX];
277 char anonstr[] = "//anon";
278 unsigned int ino;
279 size_t size;
280 ssize_t n;
281
282 if (fgets(bf, sizeof(bf), fp) == NULL)
283 break;
284
285 if ((rdclock() - t) > timeout) {
286 pr_warning("Reading %s time out. "
287 "You may want to increase "
288 "the time limit by --proc-map-timeout\n",
289 filename);
290 truncation = true;
291 goto out;
292 }
293
294 /* ensure null termination since stack will be reused. */
295 strcpy(execname, "");
296
297 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
298 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
299 &event->mmap2.start, &event->mmap2.len, prot,
300 &event->mmap2.pgoff, &event->mmap2.maj,
301 &event->mmap2.min,
302 &ino, execname);
303
304 /*
305 * Anon maps don't have the execname.
306 */
307 if (n < 7)
308 continue;
309
310 event->mmap2.ino = (u64)ino;
311
312 /*
313 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
314 */
315 if (machine__is_host(machine))
316 event->header.misc = PERF_RECORD_MISC_USER;
317 else
318 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
319
320 /* map protection and flags bits */
321 event->mmap2.prot = 0;
322 event->mmap2.flags = 0;
323 if (prot[0] == 'r')
324 event->mmap2.prot |= PROT_READ;
325 if (prot[1] == 'w')
326 event->mmap2.prot |= PROT_WRITE;
327 if (prot[2] == 'x')
328 event->mmap2.prot |= PROT_EXEC;
329
330 if (prot[3] == 's')
331 event->mmap2.flags |= MAP_SHARED;
332 else
333 event->mmap2.flags |= MAP_PRIVATE;
334
335 if (prot[2] != 'x') {
336 if (!mmap_data || prot[0] != 'r')
337 continue;
338
339 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
340 }
341
342out:
343 if (truncation)
344 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
345
346 if (!strcmp(execname, ""))
347 strcpy(execname, anonstr);
348
349 if (hugetlbfs_mnt_len &&
350 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
351 strcpy(execname, anonstr);
352 event->mmap2.flags |= MAP_HUGETLB;
353 }
354
355 size = strlen(execname) + 1;
356 memcpy(event->mmap2.filename, execname, size);
357 size = PERF_ALIGN(size, sizeof(u64));
358 event->mmap2.len -= event->mmap.start;
359 event->mmap2.header.size = (sizeof(event->mmap2) -
360 (sizeof(event->mmap2.filename) - size));
361 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
362 event->mmap2.header.size += machine->id_hdr_size;
363 event->mmap2.pid = tgid;
364 event->mmap2.tid = pid;
365
366 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
367 rc = -1;
368 break;
369 }
370
371 if (truncation)
372 break;
373 }
374
375 fclose(fp);
376 return rc;
377}
378
379int perf_event__synthesize_modules(struct perf_tool *tool,
380 perf_event__handler_t process,
381 struct machine *machine)
382{
383 int rc = 0;
384 struct map *pos;
385 struct map_groups *kmaps = &machine->kmaps;
386 struct maps *maps = &kmaps->maps[MAP__FUNCTION];
387 union perf_event *event = zalloc((sizeof(event->mmap) +
388 machine->id_hdr_size));
389 if (event == NULL) {
390 pr_debug("Not enough memory synthesizing mmap event "
391 "for kernel modules\n");
392 return -1;
393 }
394
395 event->header.type = PERF_RECORD_MMAP;
396
397 /*
398 * kernel uses 0 for user space maps, see kernel/perf_event.c
399 * __perf_event_mmap
400 */
401 if (machine__is_host(machine))
402 event->header.misc = PERF_RECORD_MISC_KERNEL;
403 else
404 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
405
406 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
407 size_t size;
408
409 if (__map__is_kernel(pos))
410 continue;
411
412 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
413 event->mmap.header.type = PERF_RECORD_MMAP;
414 event->mmap.header.size = (sizeof(event->mmap) -
415 (sizeof(event->mmap.filename) - size));
416 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
417 event->mmap.header.size += machine->id_hdr_size;
418 event->mmap.start = pos->start;
419 event->mmap.len = pos->end - pos->start;
420 event->mmap.pid = machine->pid;
421
422 memcpy(event->mmap.filename, pos->dso->long_name,
423 pos->dso->long_name_len + 1);
424 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
425 rc = -1;
426 break;
427 }
428 }
429
430 free(event);
431 return rc;
432}
433
434static int __event__synthesize_thread(union perf_event *comm_event,
435 union perf_event *mmap_event,
436 union perf_event *fork_event,
437 pid_t pid, int full,
438 perf_event__handler_t process,
439 struct perf_tool *tool,
440 struct machine *machine,
441 bool mmap_data,
442 unsigned int proc_map_timeout)
443{
444 char filename[PATH_MAX];
445 DIR *tasks;
446 struct dirent *dirent;
447 pid_t tgid, ppid;
448 int rc = 0;
449
450 /* special case: only send one comm event using passed in pid */
451 if (!full) {
452 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
453 process, machine);
454
455 if (tgid == -1)
456 return -1;
457
458 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
459 process, machine, mmap_data,
460 proc_map_timeout);
461 }
462
463 if (machine__is_default_guest(machine))
464 return 0;
465
466 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
467 machine->root_dir, pid);
468
469 tasks = opendir(filename);
470 if (tasks == NULL) {
471 pr_debug("couldn't open %s\n", filename);
472 return 0;
473 }
474
475 while ((dirent = readdir(tasks)) != NULL) {
476 char *end;
477 pid_t _pid;
478
479 _pid = strtol(dirent->d_name, &end, 10);
480 if (*end)
481 continue;
482
483 rc = -1;
484 if (perf_event__prepare_comm(comm_event, _pid, machine,
485 &tgid, &ppid) != 0)
486 break;
487
488 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
489 ppid, process, machine) < 0)
490 break;
491 /*
492 * Send the prepared comm event
493 */
494 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
495 break;
496
497 rc = 0;
498 if (_pid == pid) {
499 /* process the parent's maps too */
500 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
501 process, machine, mmap_data, proc_map_timeout);
502 if (rc)
503 break;
504 }
505 }
506
507 closedir(tasks);
508 return rc;
509}
510
511int perf_event__synthesize_thread_map(struct perf_tool *tool,
512 struct thread_map *threads,
513 perf_event__handler_t process,
514 struct machine *machine,
515 bool mmap_data,
516 unsigned int proc_map_timeout)
517{
518 union perf_event *comm_event, *mmap_event, *fork_event;
519 int err = -1, thread, j;
520
521 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
522 if (comm_event == NULL)
523 goto out;
524
525 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
526 if (mmap_event == NULL)
527 goto out_free_comm;
528
529 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
530 if (fork_event == NULL)
531 goto out_free_mmap;
532
533 err = 0;
534 for (thread = 0; thread < threads->nr; ++thread) {
535 if (__event__synthesize_thread(comm_event, mmap_event,
536 fork_event,
537 thread_map__pid(threads, thread), 0,
538 process, tool, machine,
539 mmap_data, proc_map_timeout)) {
540 err = -1;
541 break;
542 }
543
544 /*
545 * comm.pid is set to thread group id by
546 * perf_event__synthesize_comm
547 */
548 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
549 bool need_leader = true;
550
551 /* is thread group leader in thread_map? */
552 for (j = 0; j < threads->nr; ++j) {
553 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
554 need_leader = false;
555 break;
556 }
557 }
558
559 /* if not, generate events for it */
560 if (need_leader &&
561 __event__synthesize_thread(comm_event, mmap_event,
562 fork_event,
563 comm_event->comm.pid, 0,
564 process, tool, machine,
565 mmap_data, proc_map_timeout)) {
566 err = -1;
567 break;
568 }
569 }
570 }
571 free(fork_event);
572out_free_mmap:
573 free(mmap_event);
574out_free_comm:
575 free(comm_event);
576out:
577 return err;
578}
579
580int perf_event__synthesize_threads(struct perf_tool *tool,
581 perf_event__handler_t process,
582 struct machine *machine,
583 bool mmap_data,
584 unsigned int proc_map_timeout)
585{
586 DIR *proc;
587 char proc_path[PATH_MAX];
588 struct dirent *dirent;
589 union perf_event *comm_event, *mmap_event, *fork_event;
590 int err = -1;
591
592 if (machine__is_default_guest(machine))
593 return 0;
594
595 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
596 if (comm_event == NULL)
597 goto out;
598
599 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
600 if (mmap_event == NULL)
601 goto out_free_comm;
602
603 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
604 if (fork_event == NULL)
605 goto out_free_mmap;
606
607 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
608 proc = opendir(proc_path);
609
610 if (proc == NULL)
611 goto out_free_fork;
612
613 while ((dirent = readdir(proc)) != NULL) {
614 char *end;
615 pid_t pid = strtol(dirent->d_name, &end, 10);
616
617 if (*end) /* only interested in proper numerical dirents */
618 continue;
619 /*
620 * We may race with exiting thread, so don't stop just because
621 * one thread couldn't be synthesized.
622 */
623 __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
624 1, process, tool, machine, mmap_data,
625 proc_map_timeout);
626 }
627
628 err = 0;
629 closedir(proc);
630out_free_fork:
631 free(fork_event);
632out_free_mmap:
633 free(mmap_event);
634out_free_comm:
635 free(comm_event);
636out:
637 return err;
638}
639
640struct process_symbol_args {
641 const char *name;
642 u64 start;
643};
644
645static int find_symbol_cb(void *arg, const char *name, char type,
646 u64 start)
647{
648 struct process_symbol_args *args = arg;
649
650 /*
651 * Must be a function or at least an alias, as in PARISC64, where "_text" is
652 * an 'A' to the same address as "_stext".
653 */
654 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
655 type == 'A') || strcmp(name, args->name))
656 return 0;
657
658 args->start = start;
659 return 1;
660}
661
662u64 kallsyms__get_function_start(const char *kallsyms_filename,
663 const char *symbol_name)
664{
665 struct process_symbol_args args = { .name = symbol_name, };
666
667 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
668 return 0;
669
670 return args.start;
671}
672
673int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
674 perf_event__handler_t process,
675 struct machine *machine)
676{
677 size_t size;
678 const char *mmap_name;
679 char name_buff[PATH_MAX];
680 struct map *map = machine__kernel_map(machine);
681 struct kmap *kmap;
682 int err;
683 union perf_event *event;
684
685 if (symbol_conf.kptr_restrict)
686 return -1;
687 if (map == NULL)
688 return -1;
689
690 /*
691 * We should get this from /sys/kernel/sections/.text, but till that is
692 * available use this, and after it is use this as a fallback for older
693 * kernels.
694 */
695 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
696 if (event == NULL) {
697 pr_debug("Not enough memory synthesizing mmap event "
698 "for kernel modules\n");
699 return -1;
700 }
701
702 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
703 if (machine__is_host(machine)) {
704 /*
705 * kernel uses PERF_RECORD_MISC_USER for user space maps,
706 * see kernel/perf_event.c __perf_event_mmap
707 */
708 event->header.misc = PERF_RECORD_MISC_KERNEL;
709 } else {
710 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
711 }
712
713 kmap = map__kmap(map);
714 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
715 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
716 size = PERF_ALIGN(size, sizeof(u64));
717 event->mmap.header.type = PERF_RECORD_MMAP;
718 event->mmap.header.size = (sizeof(event->mmap) -
719 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
720 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
721 event->mmap.start = map->start;
722 event->mmap.len = map->end - event->mmap.start;
723 event->mmap.pid = machine->pid;
724
725 err = perf_tool__process_synth_event(tool, event, machine, process);
726 free(event);
727
728 return err;
729}
730
731int perf_event__synthesize_thread_map2(struct perf_tool *tool,
732 struct thread_map *threads,
733 perf_event__handler_t process,
734 struct machine *machine)
735{
736 union perf_event *event;
737 int i, err, size;
738
739 size = sizeof(event->thread_map);
740 size += threads->nr * sizeof(event->thread_map.entries[0]);
741
742 event = zalloc(size);
743 if (!event)
744 return -ENOMEM;
745
746 event->header.type = PERF_RECORD_THREAD_MAP;
747 event->header.size = size;
748 event->thread_map.nr = threads->nr;
749
750 for (i = 0; i < threads->nr; i++) {
751 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
752 char *comm = thread_map__comm(threads, i);
753
754 if (!comm)
755 comm = (char *) "";
756
757 entry->pid = thread_map__pid(threads, i);
758 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
759 }
760
761 err = process(tool, event, NULL, machine);
762
763 free(event);
764 return err;
765}
766
767static void synthesize_cpus(struct cpu_map_entries *cpus,
768 struct cpu_map *map)
769{
770 int i;
771
772 cpus->nr = map->nr;
773
774 for (i = 0; i < map->nr; i++)
775 cpus->cpu[i] = map->map[i];
776}
777
778static void synthesize_mask(struct cpu_map_mask *mask,
779 struct cpu_map *map, int max)
780{
781 int i;
782
783 mask->nr = BITS_TO_LONGS(max);
784 mask->long_size = sizeof(long);
785
786 for (i = 0; i < map->nr; i++)
787 set_bit(map->map[i], mask->mask);
788}
789
790static size_t cpus_size(struct cpu_map *map)
791{
792 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
793}
794
795static size_t mask_size(struct cpu_map *map, int *max)
796{
797 int i;
798
799 *max = 0;
800
801 for (i = 0; i < map->nr; i++) {
802 /* bit possition of the cpu is + 1 */
803 int bit = map->map[i] + 1;
804
805 if (bit > *max)
806 *max = bit;
807 }
808
809 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
810}
811
812void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
813{
814 size_t size_cpus, size_mask;
815 bool is_dummy = cpu_map__empty(map);
816
817 /*
818 * Both array and mask data have variable size based
819 * on the number of cpus and their actual values.
820 * The size of the 'struct cpu_map_data' is:
821 *
822 * array = size of 'struct cpu_map_entries' +
823 * number of cpus * sizeof(u64)
824 *
825 * mask = size of 'struct cpu_map_mask' +
826 * maximum cpu bit converted to size of longs
827 *
828 * and finaly + the size of 'struct cpu_map_data'.
829 */
830 size_cpus = cpus_size(map);
831 size_mask = mask_size(map, max);
832
833 if (is_dummy || (size_cpus < size_mask)) {
834 *size += size_cpus;
835 *type = PERF_CPU_MAP__CPUS;
836 } else {
837 *size += size_mask;
838 *type = PERF_CPU_MAP__MASK;
839 }
840
841 *size += sizeof(struct cpu_map_data);
842 return zalloc(*size);
843}
844
845void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
846 u16 type, int max)
847{
848 data->type = type;
849
850 switch (type) {
851 case PERF_CPU_MAP__CPUS:
852 synthesize_cpus((struct cpu_map_entries *) data->data, map);
853 break;
854 case PERF_CPU_MAP__MASK:
855 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
856 default:
857 break;
858 };
859}
860
861static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
862{
863 size_t size = sizeof(struct cpu_map_event);
864 struct cpu_map_event *event;
865 int max;
866 u16 type;
867
868 event = cpu_map_data__alloc(map, &size, &type, &max);
869 if (!event)
870 return NULL;
871
872 event->header.type = PERF_RECORD_CPU_MAP;
873 event->header.size = size;
874 event->data.type = type;
875
876 cpu_map_data__synthesize(&event->data, map, type, max);
877 return event;
878}
879
880int perf_event__synthesize_cpu_map(struct perf_tool *tool,
881 struct cpu_map *map,
882 perf_event__handler_t process,
883 struct machine *machine)
884{
885 struct cpu_map_event *event;
886 int err;
887
888 event = cpu_map_event__new(map);
889 if (!event)
890 return -ENOMEM;
891
892 err = process(tool, (union perf_event *) event, NULL, machine);
893
894 free(event);
895 return err;
896}
897
898int perf_event__synthesize_stat_config(struct perf_tool *tool,
899 struct perf_stat_config *config,
900 perf_event__handler_t process,
901 struct machine *machine)
902{
903 struct stat_config_event *event;
904 int size, i = 0, err;
905
906 size = sizeof(*event);
907 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
908
909 event = zalloc(size);
910 if (!event)
911 return -ENOMEM;
912
913 event->header.type = PERF_RECORD_STAT_CONFIG;
914 event->header.size = size;
915 event->nr = PERF_STAT_CONFIG_TERM__MAX;
916
917#define ADD(__term, __val) \
918 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
919 event->data[i].val = __val; \
920 i++;
921
922 ADD(AGGR_MODE, config->aggr_mode)
923 ADD(INTERVAL, config->interval)
924 ADD(SCALE, config->scale)
925
926 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
927 "stat config terms unbalanced\n");
928#undef ADD
929
930 err = process(tool, (union perf_event *) event, NULL, machine);
931
932 free(event);
933 return err;
934}
935
936int perf_event__synthesize_stat(struct perf_tool *tool,
937 u32 cpu, u32 thread, u64 id,
938 struct perf_counts_values *count,
939 perf_event__handler_t process,
940 struct machine *machine)
941{
942 struct stat_event event;
943
944 event.header.type = PERF_RECORD_STAT;
945 event.header.size = sizeof(event);
946 event.header.misc = 0;
947
948 event.id = id;
949 event.cpu = cpu;
950 event.thread = thread;
951 event.val = count->val;
952 event.ena = count->ena;
953 event.run = count->run;
954
955 return process(tool, (union perf_event *) &event, NULL, machine);
956}
957
958int perf_event__synthesize_stat_round(struct perf_tool *tool,
959 u64 evtime, u64 type,
960 perf_event__handler_t process,
961 struct machine *machine)
962{
963 struct stat_round_event event;
964
965 event.header.type = PERF_RECORD_STAT_ROUND;
966 event.header.size = sizeof(event);
967 event.header.misc = 0;
968
969 event.time = evtime;
970 event.type = type;
971
972 return process(tool, (union perf_event *) &event, NULL, machine);
973}
974
975void perf_event__read_stat_config(struct perf_stat_config *config,
976 struct stat_config_event *event)
977{
978 unsigned i;
979
980 for (i = 0; i < event->nr; i++) {
981
982 switch (event->data[i].tag) {
983#define CASE(__term, __val) \
984 case PERF_STAT_CONFIG_TERM__##__term: \
985 config->__val = event->data[i].val; \
986 break;
987
988 CASE(AGGR_MODE, aggr_mode)
989 CASE(SCALE, scale)
990 CASE(INTERVAL, interval)
991#undef CASE
992 default:
993 pr_warning("unknown stat config term %" PRIu64 "\n",
994 event->data[i].tag);
995 }
996 }
997}
998
999size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1000{
1001 const char *s;
1002
1003 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1004 s = " exec";
1005 else
1006 s = "";
1007
1008 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1009}
1010
1011int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1012 union perf_event *event,
1013 struct perf_sample *sample,
1014 struct machine *machine)
1015{
1016 return machine__process_comm_event(machine, event, sample);
1017}
1018
1019int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1020 union perf_event *event,
1021 struct perf_sample *sample,
1022 struct machine *machine)
1023{
1024 return machine__process_lost_event(machine, event, sample);
1025}
1026
1027int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1028 union perf_event *event,
1029 struct perf_sample *sample __maybe_unused,
1030 struct machine *machine)
1031{
1032 return machine__process_aux_event(machine, event);
1033}
1034
1035int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1036 union perf_event *event,
1037 struct perf_sample *sample __maybe_unused,
1038 struct machine *machine)
1039{
1040 return machine__process_itrace_start_event(machine, event);
1041}
1042
1043int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1044 union perf_event *event,
1045 struct perf_sample *sample,
1046 struct machine *machine)
1047{
1048 return machine__process_lost_samples_event(machine, event, sample);
1049}
1050
1051int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1052 union perf_event *event,
1053 struct perf_sample *sample __maybe_unused,
1054 struct machine *machine)
1055{
1056 return machine__process_switch_event(machine, event);
1057}
1058
1059size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1060{
1061 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1062 event->mmap.pid, event->mmap.tid, event->mmap.start,
1063 event->mmap.len, event->mmap.pgoff,
1064 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1065 event->mmap.filename);
1066}
1067
1068size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1069{
1070 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1071 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1072 event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1073 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1074 event->mmap2.min, event->mmap2.ino,
1075 event->mmap2.ino_generation,
1076 (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1077 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1078 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1079 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1080 event->mmap2.filename);
1081}
1082
1083size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1084{
1085 struct thread_map *threads = thread_map__new_event(&event->thread_map);
1086 size_t ret;
1087
1088 ret = fprintf(fp, " nr: ");
1089
1090 if (threads)
1091 ret += thread_map__fprintf(threads, fp);
1092 else
1093 ret += fprintf(fp, "failed to get threads from event\n");
1094
1095 thread_map__put(threads);
1096 return ret;
1097}
1098
1099size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1100{
1101 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1102 size_t ret;
1103
1104 ret = fprintf(fp, ": ");
1105
1106 if (cpus)
1107 ret += cpu_map__fprintf(cpus, fp);
1108 else
1109 ret += fprintf(fp, "failed to get cpumap from event\n");
1110
1111 cpu_map__put(cpus);
1112 return ret;
1113}
1114
1115int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1116 union perf_event *event,
1117 struct perf_sample *sample,
1118 struct machine *machine)
1119{
1120 return machine__process_mmap_event(machine, event, sample);
1121}
1122
1123int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1124 union perf_event *event,
1125 struct perf_sample *sample,
1126 struct machine *machine)
1127{
1128 return machine__process_mmap2_event(machine, event, sample);
1129}
1130
1131size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1132{
1133 return fprintf(fp, "(%d:%d):(%d:%d)\n",
1134 event->fork.pid, event->fork.tid,
1135 event->fork.ppid, event->fork.ptid);
1136}
1137
1138int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1139 union perf_event *event,
1140 struct perf_sample *sample,
1141 struct machine *machine)
1142{
1143 return machine__process_fork_event(machine, event, sample);
1144}
1145
1146int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1147 union perf_event *event,
1148 struct perf_sample *sample,
1149 struct machine *machine)
1150{
1151 return machine__process_exit_event(machine, event, sample);
1152}
1153
1154size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1155{
1156 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
1157 event->aux.aux_offset, event->aux.aux_size,
1158 event->aux.flags,
1159 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1160 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
1161}
1162
1163size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1164{
1165 return fprintf(fp, " pid: %u tid: %u\n",
1166 event->itrace_start.pid, event->itrace_start.tid);
1167}
1168
1169size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1170{
1171 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1172 const char *in_out = out ? "OUT" : "IN ";
1173
1174 if (event->header.type == PERF_RECORD_SWITCH)
1175 return fprintf(fp, " %s\n", in_out);
1176
1177 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n",
1178 in_out, out ? "next" : "prev",
1179 event->context_switch.next_prev_pid,
1180 event->context_switch.next_prev_tid);
1181}
1182
1183size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1184{
1185 size_t ret = fprintf(fp, "PERF_RECORD_%s",
1186 perf_event__name(event->header.type));
1187
1188 switch (event->header.type) {
1189 case PERF_RECORD_COMM:
1190 ret += perf_event__fprintf_comm(event, fp);
1191 break;
1192 case PERF_RECORD_FORK:
1193 case PERF_RECORD_EXIT:
1194 ret += perf_event__fprintf_task(event, fp);
1195 break;
1196 case PERF_RECORD_MMAP:
1197 ret += perf_event__fprintf_mmap(event, fp);
1198 break;
1199 case PERF_RECORD_MMAP2:
1200 ret += perf_event__fprintf_mmap2(event, fp);
1201 break;
1202 case PERF_RECORD_AUX:
1203 ret += perf_event__fprintf_aux(event, fp);
1204 break;
1205 case PERF_RECORD_ITRACE_START:
1206 ret += perf_event__fprintf_itrace_start(event, fp);
1207 break;
1208 case PERF_RECORD_SWITCH:
1209 case PERF_RECORD_SWITCH_CPU_WIDE:
1210 ret += perf_event__fprintf_switch(event, fp);
1211 break;
1212 default:
1213 ret += fprintf(fp, "\n");
1214 }
1215
1216 return ret;
1217}
1218
1219int perf_event__process(struct perf_tool *tool __maybe_unused,
1220 union perf_event *event,
1221 struct perf_sample *sample,
1222 struct machine *machine)
1223{
1224 return machine__process_event(machine, event, sample);
1225}
1226
1227void thread__find_addr_map(struct thread *thread, u8 cpumode,
1228 enum map_type type, u64 addr,
1229 struct addr_location *al)
1230{
1231 struct map_groups *mg = thread->mg;
1232 struct machine *machine = mg->machine;
1233 bool load_map = false;
1234
1235 al->machine = machine;
1236 al->thread = thread;
1237 al->addr = addr;
1238 al->cpumode = cpumode;
1239 al->filtered = 0;
1240
1241 if (machine == NULL) {
1242 al->map = NULL;
1243 return;
1244 }
1245
1246 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1247 al->level = 'k';
1248 mg = &machine->kmaps;
1249 load_map = true;
1250 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1251 al->level = '.';
1252 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1253 al->level = 'g';
1254 mg = &machine->kmaps;
1255 load_map = true;
1256 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1257 al->level = 'u';
1258 } else {
1259 al->level = 'H';
1260 al->map = NULL;
1261
1262 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1263 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1264 !perf_guest)
1265 al->filtered |= (1 << HIST_FILTER__GUEST);
1266 if ((cpumode == PERF_RECORD_MISC_USER ||
1267 cpumode == PERF_RECORD_MISC_KERNEL) &&
1268 !perf_host)
1269 al->filtered |= (1 << HIST_FILTER__HOST);
1270
1271 return;
1272 }
1273try_again:
1274 al->map = map_groups__find(mg, type, al->addr);
1275 if (al->map == NULL) {
1276 /*
1277 * If this is outside of all known maps, and is a negative
1278 * address, try to look it up in the kernel dso, as it might be
1279 * a vsyscall or vdso (which executes in user-mode).
1280 *
1281 * XXX This is nasty, we should have a symbol list in the
1282 * "[vdso]" dso, but for now lets use the old trick of looking
1283 * in the whole kernel symbol list.
1284 */
1285 if (cpumode == PERF_RECORD_MISC_USER && machine &&
1286 mg != &machine->kmaps &&
1287 machine__kernel_ip(machine, al->addr)) {
1288 mg = &machine->kmaps;
1289 load_map = true;
1290 goto try_again;
1291 }
1292 } else {
1293 /*
1294 * Kernel maps might be changed when loading symbols so loading
1295 * must be done prior to using kernel maps.
1296 */
1297 if (load_map)
1298 map__load(al->map);
1299 al->addr = al->map->map_ip(al->map, al->addr);
1300 }
1301}
1302
1303void thread__find_addr_location(struct thread *thread,
1304 u8 cpumode, enum map_type type, u64 addr,
1305 struct addr_location *al)
1306{
1307 thread__find_addr_map(thread, cpumode, type, addr, al);
1308 if (al->map != NULL)
1309 al->sym = map__find_symbol(al->map, al->addr);
1310 else
1311 al->sym = NULL;
1312}
1313
1314/*
1315 * Callers need to drop the reference to al->thread, obtained in
1316 * machine__findnew_thread()
1317 */
1318int machine__resolve(struct machine *machine, struct addr_location *al,
1319 struct perf_sample *sample)
1320{
1321 struct thread *thread = machine__findnew_thread(machine, sample->pid,
1322 sample->tid);
1323
1324 if (thread == NULL)
1325 return -1;
1326
1327 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1328 /*
1329 * Have we already created the kernel maps for this machine?
1330 *
1331 * This should have happened earlier, when we processed the kernel MMAP
1332 * events, but for older perf.data files there was no such thing, so do
1333 * it now.
1334 */
1335 if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1336 machine__kernel_map(machine) == NULL)
1337 machine__create_kernel_maps(machine);
1338
1339 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1340 dump_printf(" ...... dso: %s\n",
1341 al->map ? al->map->dso->long_name :
1342 al->level == 'H' ? "[hypervisor]" : "<not found>");
1343
1344 if (thread__is_filtered(thread))
1345 al->filtered |= (1 << HIST_FILTER__THREAD);
1346
1347 al->sym = NULL;
1348 al->cpu = sample->cpu;
1349 al->socket = -1;
1350
1351 if (al->cpu >= 0) {
1352 struct perf_env *env = machine->env;
1353
1354 if (env && env->cpu)
1355 al->socket = env->cpu[al->cpu].socket_id;
1356 }
1357
1358 if (al->map) {
1359 struct dso *dso = al->map->dso;
1360
1361 if (symbol_conf.dso_list &&
1362 (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1363 dso->short_name) ||
1364 (dso->short_name != dso->long_name &&
1365 strlist__has_entry(symbol_conf.dso_list,
1366 dso->long_name))))) {
1367 al->filtered |= (1 << HIST_FILTER__DSO);
1368 }
1369
1370 al->sym = map__find_symbol(al->map, al->addr);
1371 }
1372
1373 if (symbol_conf.sym_list &&
1374 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1375 al->sym->name))) {
1376 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1377 }
1378
1379 return 0;
1380}
1381
1382/*
1383 * The preprocess_sample method will return with reference counts for the
1384 * in it, when done using (and perhaps getting ref counts if needing to
1385 * keep a pointer to one of those entries) it must be paired with
1386 * addr_location__put(), so that the refcounts can be decremented.
1387 */
1388void addr_location__put(struct addr_location *al)
1389{
1390 thread__zput(al->thread);
1391}
1392
1393bool is_bts_event(struct perf_event_attr *attr)
1394{
1395 return attr->type == PERF_TYPE_HARDWARE &&
1396 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1397 attr->sample_period == 1;
1398}
1399
1400bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1401{
1402 if (attr->type == PERF_TYPE_SOFTWARE &&
1403 (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1404 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1405 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1406 return true;
1407
1408 if (is_bts_event(attr))
1409 return true;
1410
1411 return false;
1412}
1413
1414void thread__resolve(struct thread *thread, struct addr_location *al,
1415 struct perf_sample *sample)
1416{
1417 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1418 if (!al->map)
1419 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1420 sample->addr, al);
1421
1422 al->cpu = sample->cpu;
1423 al->sym = NULL;
1424
1425 if (al->map)
1426 al->sym = map__find_symbol(al->map, al->addr);
1427}