Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h>
3#include <errno.h>
4#include <fcntl.h>
5#include <inttypes.h>
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <sys/types.h>
9#include <sys/stat.h>
10#include <unistd.h>
11#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12#include <api/fs/fs.h>
13#include <linux/perf_event.h>
14#include "event.h"
15#include "debug.h"
16#include "hist.h"
17#include "machine.h"
18#include "sort.h"
19#include "string2.h"
20#include "strlist.h"
21#include "thread.h"
22#include "thread_map.h"
23#include "sane_ctype.h"
24#include "symbol/kallsyms.h"
25#include "asm/bug.h"
26#include "stat.h"
27
28static const char *perf_event__names[] = {
29 [0] = "TOTAL",
30 [PERF_RECORD_MMAP] = "MMAP",
31 [PERF_RECORD_MMAP2] = "MMAP2",
32 [PERF_RECORD_LOST] = "LOST",
33 [PERF_RECORD_COMM] = "COMM",
34 [PERF_RECORD_EXIT] = "EXIT",
35 [PERF_RECORD_THROTTLE] = "THROTTLE",
36 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
37 [PERF_RECORD_FORK] = "FORK",
38 [PERF_RECORD_READ] = "READ",
39 [PERF_RECORD_SAMPLE] = "SAMPLE",
40 [PERF_RECORD_AUX] = "AUX",
41 [PERF_RECORD_ITRACE_START] = "ITRACE_START",
42 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES",
43 [PERF_RECORD_SWITCH] = "SWITCH",
44 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
45 [PERF_RECORD_NAMESPACES] = "NAMESPACES",
46 [PERF_RECORD_HEADER_ATTR] = "ATTR",
47 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
48 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
49 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
50 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
51 [PERF_RECORD_ID_INDEX] = "ID_INDEX",
52 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
53 [PERF_RECORD_AUXTRACE] = "AUXTRACE",
54 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
55 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP",
56 [PERF_RECORD_CPU_MAP] = "CPU_MAP",
57 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG",
58 [PERF_RECORD_STAT] = "STAT",
59 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
60 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
61 [PERF_RECORD_TIME_CONV] = "TIME_CONV",
62 [PERF_RECORD_HEADER_FEATURE] = "FEATURE",
63};
64
65static const char *perf_ns__names[] = {
66 [NET_NS_INDEX] = "net",
67 [UTS_NS_INDEX] = "uts",
68 [IPC_NS_INDEX] = "ipc",
69 [PID_NS_INDEX] = "pid",
70 [USER_NS_INDEX] = "user",
71 [MNT_NS_INDEX] = "mnt",
72 [CGROUP_NS_INDEX] = "cgroup",
73};
74
75const char *perf_event__name(unsigned int id)
76{
77 if (id >= ARRAY_SIZE(perf_event__names))
78 return "INVALID";
79 if (!perf_event__names[id])
80 return "UNKNOWN";
81 return perf_event__names[id];
82}
83
84static const char *perf_ns__name(unsigned int id)
85{
86 if (id >= ARRAY_SIZE(perf_ns__names))
87 return "UNKNOWN";
88 return perf_ns__names[id];
89}
90
91static int perf_tool__process_synth_event(struct perf_tool *tool,
92 union perf_event *event,
93 struct machine *machine,
94 perf_event__handler_t process)
95{
96 struct perf_sample synth_sample = {
97 .pid = -1,
98 .tid = -1,
99 .time = -1,
100 .stream_id = -1,
101 .cpu = -1,
102 .period = 1,
103 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
104 };
105
106 return process(tool, event, &synth_sample, machine);
107};
108
109/*
110 * Assumes that the first 4095 bytes of /proc/pid/stat contains
111 * the comm, tgid and ppid.
112 */
113static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
114 pid_t *tgid, pid_t *ppid)
115{
116 char filename[PATH_MAX];
117 char bf[4096];
118 int fd;
119 size_t size = 0;
120 ssize_t n;
121 char *name, *tgids, *ppids;
122
123 *tgid = -1;
124 *ppid = -1;
125
126 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
127
128 fd = open(filename, O_RDONLY);
129 if (fd < 0) {
130 pr_debug("couldn't open %s\n", filename);
131 return -1;
132 }
133
134 n = read(fd, bf, sizeof(bf) - 1);
135 close(fd);
136 if (n <= 0) {
137 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
138 pid);
139 return -1;
140 }
141 bf[n] = '\0';
142
143 name = strstr(bf, "Name:");
144 tgids = strstr(bf, "Tgid:");
145 ppids = strstr(bf, "PPid:");
146
147 if (name) {
148 char *nl;
149
150 name += 5; /* strlen("Name:") */
151 name = ltrim(name);
152
153 nl = strchr(name, '\n');
154 if (nl)
155 *nl = '\0';
156
157 size = strlen(name);
158 if (size >= len)
159 size = len - 1;
160 memcpy(comm, name, size);
161 comm[size] = '\0';
162 } else {
163 pr_debug("Name: string not found for pid %d\n", pid);
164 }
165
166 if (tgids) {
167 tgids += 5; /* strlen("Tgid:") */
168 *tgid = atoi(tgids);
169 } else {
170 pr_debug("Tgid: string not found for pid %d\n", pid);
171 }
172
173 if (ppids) {
174 ppids += 5; /* strlen("PPid:") */
175 *ppid = atoi(ppids);
176 } else {
177 pr_debug("PPid: string not found for pid %d\n", pid);
178 }
179
180 return 0;
181}
182
183static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
184 struct machine *machine,
185 pid_t *tgid, pid_t *ppid)
186{
187 size_t size;
188
189 *ppid = -1;
190
191 memset(&event->comm, 0, sizeof(event->comm));
192
193 if (machine__is_host(machine)) {
194 if (perf_event__get_comm_ids(pid, event->comm.comm,
195 sizeof(event->comm.comm),
196 tgid, ppid) != 0) {
197 return -1;
198 }
199 } else {
200 *tgid = machine->pid;
201 }
202
203 if (*tgid < 0)
204 return -1;
205
206 event->comm.pid = *tgid;
207 event->comm.header.type = PERF_RECORD_COMM;
208
209 size = strlen(event->comm.comm) + 1;
210 size = PERF_ALIGN(size, sizeof(u64));
211 memset(event->comm.comm + size, 0, machine->id_hdr_size);
212 event->comm.header.size = (sizeof(event->comm) -
213 (sizeof(event->comm.comm) - size) +
214 machine->id_hdr_size);
215 event->comm.tid = pid;
216
217 return 0;
218}
219
220pid_t perf_event__synthesize_comm(struct perf_tool *tool,
221 union perf_event *event, pid_t pid,
222 perf_event__handler_t process,
223 struct machine *machine)
224{
225 pid_t tgid, ppid;
226
227 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
228 return -1;
229
230 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
231 return -1;
232
233 return tgid;
234}
235
236static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
237 struct perf_ns_link_info *ns_link_info)
238{
239 struct stat64 st;
240 char proc_ns[128];
241
242 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
243 if (stat64(proc_ns, &st) == 0) {
244 ns_link_info->dev = st.st_dev;
245 ns_link_info->ino = st.st_ino;
246 }
247}
248
249int perf_event__synthesize_namespaces(struct perf_tool *tool,
250 union perf_event *event,
251 pid_t pid, pid_t tgid,
252 perf_event__handler_t process,
253 struct machine *machine)
254{
255 u32 idx;
256 struct perf_ns_link_info *ns_link_info;
257
258 if (!tool || !tool->namespace_events)
259 return 0;
260
261 memset(&event->namespaces, 0, (sizeof(event->namespaces) +
262 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
263 machine->id_hdr_size));
264
265 event->namespaces.pid = tgid;
266 event->namespaces.tid = pid;
267
268 event->namespaces.nr_namespaces = NR_NAMESPACES;
269
270 ns_link_info = event->namespaces.link_info;
271
272 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
273 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
274 &ns_link_info[idx]);
275
276 event->namespaces.header.type = PERF_RECORD_NAMESPACES;
277
278 event->namespaces.header.size = (sizeof(event->namespaces) +
279 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
280 machine->id_hdr_size);
281
282 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
283 return -1;
284
285 return 0;
286}
287
288static int perf_event__synthesize_fork(struct perf_tool *tool,
289 union perf_event *event,
290 pid_t pid, pid_t tgid, pid_t ppid,
291 perf_event__handler_t process,
292 struct machine *machine)
293{
294 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
295
296 /*
297 * for main thread set parent to ppid from status file. For other
298 * threads set parent pid to main thread. ie., assume main thread
299 * spawns all threads in a process
300 */
301 if (tgid == pid) {
302 event->fork.ppid = ppid;
303 event->fork.ptid = ppid;
304 } else {
305 event->fork.ppid = tgid;
306 event->fork.ptid = tgid;
307 }
308 event->fork.pid = tgid;
309 event->fork.tid = pid;
310 event->fork.header.type = PERF_RECORD_FORK;
311
312 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
313
314 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
315 return -1;
316
317 return 0;
318}
319
320int perf_event__synthesize_mmap_events(struct perf_tool *tool,
321 union perf_event *event,
322 pid_t pid, pid_t tgid,
323 perf_event__handler_t process,
324 struct machine *machine,
325 bool mmap_data,
326 unsigned int proc_map_timeout)
327{
328 char filename[PATH_MAX];
329 FILE *fp;
330 unsigned long long t;
331 bool truncation = false;
332 unsigned long long timeout = proc_map_timeout * 1000000ULL;
333 int rc = 0;
334 const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
335 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
336
337 if (machine__is_default_guest(machine))
338 return 0;
339
340 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
341 machine->root_dir, pid, pid);
342
343 fp = fopen(filename, "r");
344 if (fp == NULL) {
345 /*
346 * We raced with a task exiting - just return:
347 */
348 pr_debug("couldn't open %s\n", filename);
349 return -1;
350 }
351
352 event->header.type = PERF_RECORD_MMAP2;
353 t = rdclock();
354
355 while (1) {
356 char bf[BUFSIZ];
357 char prot[5];
358 char execname[PATH_MAX];
359 char anonstr[] = "//anon";
360 unsigned int ino;
361 size_t size;
362 ssize_t n;
363
364 if (fgets(bf, sizeof(bf), fp) == NULL)
365 break;
366
367 if ((rdclock() - t) > timeout) {
368 pr_warning("Reading %s time out. "
369 "You may want to increase "
370 "the time limit by --proc-map-timeout\n",
371 filename);
372 truncation = true;
373 goto out;
374 }
375
376 /* ensure null termination since stack will be reused. */
377 strcpy(execname, "");
378
379 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
380 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
381 &event->mmap2.start, &event->mmap2.len, prot,
382 &event->mmap2.pgoff, &event->mmap2.maj,
383 &event->mmap2.min,
384 &ino, execname);
385
386 /*
387 * Anon maps don't have the execname.
388 */
389 if (n < 7)
390 continue;
391
392 event->mmap2.ino = (u64)ino;
393
394 /*
395 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
396 */
397 if (machine__is_host(machine))
398 event->header.misc = PERF_RECORD_MISC_USER;
399 else
400 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
401
402 /* map protection and flags bits */
403 event->mmap2.prot = 0;
404 event->mmap2.flags = 0;
405 if (prot[0] == 'r')
406 event->mmap2.prot |= PROT_READ;
407 if (prot[1] == 'w')
408 event->mmap2.prot |= PROT_WRITE;
409 if (prot[2] == 'x')
410 event->mmap2.prot |= PROT_EXEC;
411
412 if (prot[3] == 's')
413 event->mmap2.flags |= MAP_SHARED;
414 else
415 event->mmap2.flags |= MAP_PRIVATE;
416
417 if (prot[2] != 'x') {
418 if (!mmap_data || prot[0] != 'r')
419 continue;
420
421 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
422 }
423
424out:
425 if (truncation)
426 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
427
428 if (!strcmp(execname, ""))
429 strcpy(execname, anonstr);
430
431 if (hugetlbfs_mnt_len &&
432 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
433 strcpy(execname, anonstr);
434 event->mmap2.flags |= MAP_HUGETLB;
435 }
436
437 size = strlen(execname) + 1;
438 memcpy(event->mmap2.filename, execname, size);
439 size = PERF_ALIGN(size, sizeof(u64));
440 event->mmap2.len -= event->mmap.start;
441 event->mmap2.header.size = (sizeof(event->mmap2) -
442 (sizeof(event->mmap2.filename) - size));
443 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
444 event->mmap2.header.size += machine->id_hdr_size;
445 event->mmap2.pid = tgid;
446 event->mmap2.tid = pid;
447
448 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
449 rc = -1;
450 break;
451 }
452
453 if (truncation)
454 break;
455 }
456
457 fclose(fp);
458 return rc;
459}
460
461int perf_event__synthesize_modules(struct perf_tool *tool,
462 perf_event__handler_t process,
463 struct machine *machine)
464{
465 int rc = 0;
466 struct map *pos;
467 struct map_groups *kmaps = &machine->kmaps;
468 struct maps *maps = &kmaps->maps[MAP__FUNCTION];
469 union perf_event *event = zalloc((sizeof(event->mmap) +
470 machine->id_hdr_size));
471 if (event == NULL) {
472 pr_debug("Not enough memory synthesizing mmap event "
473 "for kernel modules\n");
474 return -1;
475 }
476
477 event->header.type = PERF_RECORD_MMAP;
478
479 /*
480 * kernel uses 0 for user space maps, see kernel/perf_event.c
481 * __perf_event_mmap
482 */
483 if (machine__is_host(machine))
484 event->header.misc = PERF_RECORD_MISC_KERNEL;
485 else
486 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
487
488 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
489 size_t size;
490
491 if (__map__is_kernel(pos))
492 continue;
493
494 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
495 event->mmap.header.type = PERF_RECORD_MMAP;
496 event->mmap.header.size = (sizeof(event->mmap) -
497 (sizeof(event->mmap.filename) - size));
498 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
499 event->mmap.header.size += machine->id_hdr_size;
500 event->mmap.start = pos->start;
501 event->mmap.len = pos->end - pos->start;
502 event->mmap.pid = machine->pid;
503
504 memcpy(event->mmap.filename, pos->dso->long_name,
505 pos->dso->long_name_len + 1);
506 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
507 rc = -1;
508 break;
509 }
510 }
511
512 free(event);
513 return rc;
514}
515
516static int __event__synthesize_thread(union perf_event *comm_event,
517 union perf_event *mmap_event,
518 union perf_event *fork_event,
519 union perf_event *namespaces_event,
520 pid_t pid, int full,
521 perf_event__handler_t process,
522 struct perf_tool *tool,
523 struct machine *machine,
524 bool mmap_data,
525 unsigned int proc_map_timeout)
526{
527 char filename[PATH_MAX];
528 DIR *tasks;
529 struct dirent *dirent;
530 pid_t tgid, ppid;
531 int rc = 0;
532
533 /* special case: only send one comm event using passed in pid */
534 if (!full) {
535 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
536 process, machine);
537
538 if (tgid == -1)
539 return -1;
540
541 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
542 tgid, process, machine) < 0)
543 return -1;
544
545
546 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
547 process, machine, mmap_data,
548 proc_map_timeout);
549 }
550
551 if (machine__is_default_guest(machine))
552 return 0;
553
554 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
555 machine->root_dir, pid);
556
557 tasks = opendir(filename);
558 if (tasks == NULL) {
559 pr_debug("couldn't open %s\n", filename);
560 return 0;
561 }
562
563 while ((dirent = readdir(tasks)) != NULL) {
564 char *end;
565 pid_t _pid;
566
567 _pid = strtol(dirent->d_name, &end, 10);
568 if (*end)
569 continue;
570
571 rc = -1;
572 if (perf_event__prepare_comm(comm_event, _pid, machine,
573 &tgid, &ppid) != 0)
574 break;
575
576 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
577 ppid, process, machine) < 0)
578 break;
579
580 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
581 tgid, process, machine) < 0)
582 break;
583
584 /*
585 * Send the prepared comm event
586 */
587 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
588 break;
589
590 rc = 0;
591 if (_pid == pid) {
592 /* process the parent's maps too */
593 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
594 process, machine, mmap_data, proc_map_timeout);
595 if (rc)
596 break;
597 }
598 }
599
600 closedir(tasks);
601 return rc;
602}
603
604int perf_event__synthesize_thread_map(struct perf_tool *tool,
605 struct thread_map *threads,
606 perf_event__handler_t process,
607 struct machine *machine,
608 bool mmap_data,
609 unsigned int proc_map_timeout)
610{
611 union perf_event *comm_event, *mmap_event, *fork_event;
612 union perf_event *namespaces_event;
613 int err = -1, thread, j;
614
615 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
616 if (comm_event == NULL)
617 goto out;
618
619 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
620 if (mmap_event == NULL)
621 goto out_free_comm;
622
623 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
624 if (fork_event == NULL)
625 goto out_free_mmap;
626
627 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
628 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
629 machine->id_hdr_size);
630 if (namespaces_event == NULL)
631 goto out_free_fork;
632
633 err = 0;
634 for (thread = 0; thread < threads->nr; ++thread) {
635 if (__event__synthesize_thread(comm_event, mmap_event,
636 fork_event, namespaces_event,
637 thread_map__pid(threads, thread), 0,
638 process, tool, machine,
639 mmap_data, proc_map_timeout)) {
640 err = -1;
641 break;
642 }
643
644 /*
645 * comm.pid is set to thread group id by
646 * perf_event__synthesize_comm
647 */
648 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
649 bool need_leader = true;
650
651 /* is thread group leader in thread_map? */
652 for (j = 0; j < threads->nr; ++j) {
653 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
654 need_leader = false;
655 break;
656 }
657 }
658
659 /* if not, generate events for it */
660 if (need_leader &&
661 __event__synthesize_thread(comm_event, mmap_event,
662 fork_event, namespaces_event,
663 comm_event->comm.pid, 0,
664 process, tool, machine,
665 mmap_data, proc_map_timeout)) {
666 err = -1;
667 break;
668 }
669 }
670 }
671 free(namespaces_event);
672out_free_fork:
673 free(fork_event);
674out_free_mmap:
675 free(mmap_event);
676out_free_comm:
677 free(comm_event);
678out:
679 return err;
680}
681
682static int __perf_event__synthesize_threads(struct perf_tool *tool,
683 perf_event__handler_t process,
684 struct machine *machine,
685 bool mmap_data,
686 unsigned int proc_map_timeout,
687 struct dirent **dirent,
688 int start,
689 int num)
690{
691 union perf_event *comm_event, *mmap_event, *fork_event;
692 union perf_event *namespaces_event;
693 int err = -1;
694 char *end;
695 pid_t pid;
696 int i;
697
698 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
699 if (comm_event == NULL)
700 goto out;
701
702 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
703 if (mmap_event == NULL)
704 goto out_free_comm;
705
706 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
707 if (fork_event == NULL)
708 goto out_free_mmap;
709
710 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
711 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
712 machine->id_hdr_size);
713 if (namespaces_event == NULL)
714 goto out_free_fork;
715
716 for (i = start; i < start + num; i++) {
717 if (!isdigit(dirent[i]->d_name[0]))
718 continue;
719
720 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
721 /* only interested in proper numerical dirents */
722 if (*end)
723 continue;
724 /*
725 * We may race with exiting thread, so don't stop just because
726 * one thread couldn't be synthesized.
727 */
728 __event__synthesize_thread(comm_event, mmap_event, fork_event,
729 namespaces_event, pid, 1, process,
730 tool, machine, mmap_data,
731 proc_map_timeout);
732 }
733 err = 0;
734
735 free(namespaces_event);
736out_free_fork:
737 free(fork_event);
738out_free_mmap:
739 free(mmap_event);
740out_free_comm:
741 free(comm_event);
742out:
743 return err;
744}
745
746struct synthesize_threads_arg {
747 struct perf_tool *tool;
748 perf_event__handler_t process;
749 struct machine *machine;
750 bool mmap_data;
751 unsigned int proc_map_timeout;
752 struct dirent **dirent;
753 int num;
754 int start;
755};
756
757static void *synthesize_threads_worker(void *arg)
758{
759 struct synthesize_threads_arg *args = arg;
760
761 __perf_event__synthesize_threads(args->tool, args->process,
762 args->machine, args->mmap_data,
763 args->proc_map_timeout, args->dirent,
764 args->start, args->num);
765 return NULL;
766}
767
768int perf_event__synthesize_threads(struct perf_tool *tool,
769 perf_event__handler_t process,
770 struct machine *machine,
771 bool mmap_data,
772 unsigned int proc_map_timeout,
773 unsigned int nr_threads_synthesize)
774{
775 struct synthesize_threads_arg *args = NULL;
776 pthread_t *synthesize_threads = NULL;
777 char proc_path[PATH_MAX];
778 struct dirent **dirent;
779 int num_per_thread;
780 int m, n, i, j;
781 int thread_nr;
782 int base = 0;
783 int err = -1;
784
785
786 if (machine__is_default_guest(machine))
787 return 0;
788
789 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
790 n = scandir(proc_path, &dirent, 0, alphasort);
791 if (n < 0)
792 return err;
793
794 if (nr_threads_synthesize == UINT_MAX)
795 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
796 else
797 thread_nr = nr_threads_synthesize;
798
799 if (thread_nr <= 1) {
800 err = __perf_event__synthesize_threads(tool, process,
801 machine, mmap_data,
802 proc_map_timeout,
803 dirent, base, n);
804 goto free_dirent;
805 }
806 if (thread_nr > n)
807 thread_nr = n;
808
809 synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
810 if (synthesize_threads == NULL)
811 goto free_dirent;
812
813 args = calloc(sizeof(*args), thread_nr);
814 if (args == NULL)
815 goto free_threads;
816
817 num_per_thread = n / thread_nr;
818 m = n % thread_nr;
819 for (i = 0; i < thread_nr; i++) {
820 args[i].tool = tool;
821 args[i].process = process;
822 args[i].machine = machine;
823 args[i].mmap_data = mmap_data;
824 args[i].proc_map_timeout = proc_map_timeout;
825 args[i].dirent = dirent;
826 }
827 for (i = 0; i < m; i++) {
828 args[i].num = num_per_thread + 1;
829 args[i].start = i * args[i].num;
830 }
831 if (i != 0)
832 base = args[i-1].start + args[i-1].num;
833 for (j = i; j < thread_nr; j++) {
834 args[j].num = num_per_thread;
835 args[j].start = base + (j - i) * args[i].num;
836 }
837
838 for (i = 0; i < thread_nr; i++) {
839 if (pthread_create(&synthesize_threads[i], NULL,
840 synthesize_threads_worker, &args[i]))
841 goto out_join;
842 }
843 err = 0;
844out_join:
845 for (i = 0; i < thread_nr; i++)
846 pthread_join(synthesize_threads[i], NULL);
847 free(args);
848free_threads:
849 free(synthesize_threads);
850free_dirent:
851 for (i = 0; i < n; i++)
852 free(dirent[i]);
853 free(dirent);
854
855 return err;
856}
857
858struct process_symbol_args {
859 const char *name;
860 u64 start;
861};
862
863static int find_symbol_cb(void *arg, const char *name, char type,
864 u64 start)
865{
866 struct process_symbol_args *args = arg;
867
868 /*
869 * Must be a function or at least an alias, as in PARISC64, where "_text" is
870 * an 'A' to the same address as "_stext".
871 */
872 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
873 type == 'A') || strcmp(name, args->name))
874 return 0;
875
876 args->start = start;
877 return 1;
878}
879
880int kallsyms__get_function_start(const char *kallsyms_filename,
881 const char *symbol_name, u64 *addr)
882{
883 struct process_symbol_args args = { .name = symbol_name, };
884
885 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
886 return -1;
887
888 *addr = args.start;
889 return 0;
890}
891
892int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
893 perf_event__handler_t process,
894 struct machine *machine)
895{
896 size_t size;
897 struct map *map = machine__kernel_map(machine);
898 struct kmap *kmap;
899 int err;
900 union perf_event *event;
901
902 if (symbol_conf.kptr_restrict)
903 return -1;
904 if (map == NULL)
905 return -1;
906
907 /*
908 * We should get this from /sys/kernel/sections/.text, but till that is
909 * available use this, and after it is use this as a fallback for older
910 * kernels.
911 */
912 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
913 if (event == NULL) {
914 pr_debug("Not enough memory synthesizing mmap event "
915 "for kernel modules\n");
916 return -1;
917 }
918
919 if (machine__is_host(machine)) {
920 /*
921 * kernel uses PERF_RECORD_MISC_USER for user space maps,
922 * see kernel/perf_event.c __perf_event_mmap
923 */
924 event->header.misc = PERF_RECORD_MISC_KERNEL;
925 } else {
926 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
927 }
928
929 kmap = map__kmap(map);
930 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
931 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
932 size = PERF_ALIGN(size, sizeof(u64));
933 event->mmap.header.type = PERF_RECORD_MMAP;
934 event->mmap.header.size = (sizeof(event->mmap) -
935 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
936 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
937 event->mmap.start = map->start;
938 event->mmap.len = map->end - event->mmap.start;
939 event->mmap.pid = machine->pid;
940
941 err = perf_tool__process_synth_event(tool, event, machine, process);
942 free(event);
943
944 return err;
945}
946
947int perf_event__synthesize_thread_map2(struct perf_tool *tool,
948 struct thread_map *threads,
949 perf_event__handler_t process,
950 struct machine *machine)
951{
952 union perf_event *event;
953 int i, err, size;
954
955 size = sizeof(event->thread_map);
956 size += threads->nr * sizeof(event->thread_map.entries[0]);
957
958 event = zalloc(size);
959 if (!event)
960 return -ENOMEM;
961
962 event->header.type = PERF_RECORD_THREAD_MAP;
963 event->header.size = size;
964 event->thread_map.nr = threads->nr;
965
966 for (i = 0; i < threads->nr; i++) {
967 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
968 char *comm = thread_map__comm(threads, i);
969
970 if (!comm)
971 comm = (char *) "";
972
973 entry->pid = thread_map__pid(threads, i);
974 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
975 }
976
977 err = process(tool, event, NULL, machine);
978
979 free(event);
980 return err;
981}
982
983static void synthesize_cpus(struct cpu_map_entries *cpus,
984 struct cpu_map *map)
985{
986 int i;
987
988 cpus->nr = map->nr;
989
990 for (i = 0; i < map->nr; i++)
991 cpus->cpu[i] = map->map[i];
992}
993
994static void synthesize_mask(struct cpu_map_mask *mask,
995 struct cpu_map *map, int max)
996{
997 int i;
998
999 mask->nr = BITS_TO_LONGS(max);
1000 mask->long_size = sizeof(long);
1001
1002 for (i = 0; i < map->nr; i++)
1003 set_bit(map->map[i], mask->mask);
1004}
1005
1006static size_t cpus_size(struct cpu_map *map)
1007{
1008 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1009}
1010
1011static size_t mask_size(struct cpu_map *map, int *max)
1012{
1013 int i;
1014
1015 *max = 0;
1016
1017 for (i = 0; i < map->nr; i++) {
1018 /* bit possition of the cpu is + 1 */
1019 int bit = map->map[i] + 1;
1020
1021 if (bit > *max)
1022 *max = bit;
1023 }
1024
1025 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
1026}
1027
1028void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
1029{
1030 size_t size_cpus, size_mask;
1031 bool is_dummy = cpu_map__empty(map);
1032
1033 /*
1034 * Both array and mask data have variable size based
1035 * on the number of cpus and their actual values.
1036 * The size of the 'struct cpu_map_data' is:
1037 *
1038 * array = size of 'struct cpu_map_entries' +
1039 * number of cpus * sizeof(u64)
1040 *
1041 * mask = size of 'struct cpu_map_mask' +
1042 * maximum cpu bit converted to size of longs
1043 *
1044 * and finaly + the size of 'struct cpu_map_data'.
1045 */
1046 size_cpus = cpus_size(map);
1047 size_mask = mask_size(map, max);
1048
1049 if (is_dummy || (size_cpus < size_mask)) {
1050 *size += size_cpus;
1051 *type = PERF_CPU_MAP__CPUS;
1052 } else {
1053 *size += size_mask;
1054 *type = PERF_CPU_MAP__MASK;
1055 }
1056
1057 *size += sizeof(struct cpu_map_data);
1058 return zalloc(*size);
1059}
1060
1061void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
1062 u16 type, int max)
1063{
1064 data->type = type;
1065
1066 switch (type) {
1067 case PERF_CPU_MAP__CPUS:
1068 synthesize_cpus((struct cpu_map_entries *) data->data, map);
1069 break;
1070 case PERF_CPU_MAP__MASK:
1071 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
1072 default:
1073 break;
1074 };
1075}
1076
1077static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
1078{
1079 size_t size = sizeof(struct cpu_map_event);
1080 struct cpu_map_event *event;
1081 int max;
1082 u16 type;
1083
1084 event = cpu_map_data__alloc(map, &size, &type, &max);
1085 if (!event)
1086 return NULL;
1087
1088 event->header.type = PERF_RECORD_CPU_MAP;
1089 event->header.size = size;
1090 event->data.type = type;
1091
1092 cpu_map_data__synthesize(&event->data, map, type, max);
1093 return event;
1094}
1095
1096int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1097 struct cpu_map *map,
1098 perf_event__handler_t process,
1099 struct machine *machine)
1100{
1101 struct cpu_map_event *event;
1102 int err;
1103
1104 event = cpu_map_event__new(map);
1105 if (!event)
1106 return -ENOMEM;
1107
1108 err = process(tool, (union perf_event *) event, NULL, machine);
1109
1110 free(event);
1111 return err;
1112}
1113
1114int perf_event__synthesize_stat_config(struct perf_tool *tool,
1115 struct perf_stat_config *config,
1116 perf_event__handler_t process,
1117 struct machine *machine)
1118{
1119 struct stat_config_event *event;
1120 int size, i = 0, err;
1121
1122 size = sizeof(*event);
1123 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1124
1125 event = zalloc(size);
1126 if (!event)
1127 return -ENOMEM;
1128
1129 event->header.type = PERF_RECORD_STAT_CONFIG;
1130 event->header.size = size;
1131 event->nr = PERF_STAT_CONFIG_TERM__MAX;
1132
1133#define ADD(__term, __val) \
1134 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
1135 event->data[i].val = __val; \
1136 i++;
1137
1138 ADD(AGGR_MODE, config->aggr_mode)
1139 ADD(INTERVAL, config->interval)
1140 ADD(SCALE, config->scale)
1141
1142 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1143 "stat config terms unbalanced\n");
1144#undef ADD
1145
1146 err = process(tool, (union perf_event *) event, NULL, machine);
1147
1148 free(event);
1149 return err;
1150}
1151
1152int perf_event__synthesize_stat(struct perf_tool *tool,
1153 u32 cpu, u32 thread, u64 id,
1154 struct perf_counts_values *count,
1155 perf_event__handler_t process,
1156 struct machine *machine)
1157{
1158 struct stat_event event;
1159
1160 event.header.type = PERF_RECORD_STAT;
1161 event.header.size = sizeof(event);
1162 event.header.misc = 0;
1163
1164 event.id = id;
1165 event.cpu = cpu;
1166 event.thread = thread;
1167 event.val = count->val;
1168 event.ena = count->ena;
1169 event.run = count->run;
1170
1171 return process(tool, (union perf_event *) &event, NULL, machine);
1172}
1173
1174int perf_event__synthesize_stat_round(struct perf_tool *tool,
1175 u64 evtime, u64 type,
1176 perf_event__handler_t process,
1177 struct machine *machine)
1178{
1179 struct stat_round_event event;
1180
1181 event.header.type = PERF_RECORD_STAT_ROUND;
1182 event.header.size = sizeof(event);
1183 event.header.misc = 0;
1184
1185 event.time = evtime;
1186 event.type = type;
1187
1188 return process(tool, (union perf_event *) &event, NULL, machine);
1189}
1190
1191void perf_event__read_stat_config(struct perf_stat_config *config,
1192 struct stat_config_event *event)
1193{
1194 unsigned i;
1195
1196 for (i = 0; i < event->nr; i++) {
1197
1198 switch (event->data[i].tag) {
1199#define CASE(__term, __val) \
1200 case PERF_STAT_CONFIG_TERM__##__term: \
1201 config->__val = event->data[i].val; \
1202 break;
1203
1204 CASE(AGGR_MODE, aggr_mode)
1205 CASE(SCALE, scale)
1206 CASE(INTERVAL, interval)
1207#undef CASE
1208 default:
1209 pr_warning("unknown stat config term %" PRIu64 "\n",
1210 event->data[i].tag);
1211 }
1212 }
1213}
1214
1215size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1216{
1217 const char *s;
1218
1219 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1220 s = " exec";
1221 else
1222 s = "";
1223
1224 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1225}
1226
1227size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1228{
1229 size_t ret = 0;
1230 struct perf_ns_link_info *ns_link_info;
1231 u32 nr_namespaces, idx;
1232
1233 ns_link_info = event->namespaces.link_info;
1234 nr_namespaces = event->namespaces.nr_namespaces;
1235
1236 ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1237 event->namespaces.pid,
1238 event->namespaces.tid,
1239 nr_namespaces);
1240
1241 for (idx = 0; idx < nr_namespaces; idx++) {
1242 if (idx && (idx % 4 == 0))
1243 ret += fprintf(fp, "\n\t\t ");
1244
1245 ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1246 perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1247 (u64)ns_link_info[idx].ino,
1248 ((idx + 1) != nr_namespaces) ? ", " : "]\n");
1249 }
1250
1251 return ret;
1252}
1253
1254int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1255 union perf_event *event,
1256 struct perf_sample *sample,
1257 struct machine *machine)
1258{
1259 return machine__process_comm_event(machine, event, sample);
1260}
1261
1262int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1263 union perf_event *event,
1264 struct perf_sample *sample,
1265 struct machine *machine)
1266{
1267 return machine__process_namespaces_event(machine, event, sample);
1268}
1269
1270int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1271 union perf_event *event,
1272 struct perf_sample *sample,
1273 struct machine *machine)
1274{
1275 return machine__process_lost_event(machine, event, sample);
1276}
1277
1278int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1279 union perf_event *event,
1280 struct perf_sample *sample __maybe_unused,
1281 struct machine *machine)
1282{
1283 return machine__process_aux_event(machine, event);
1284}
1285
1286int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1287 union perf_event *event,
1288 struct perf_sample *sample __maybe_unused,
1289 struct machine *machine)
1290{
1291 return machine__process_itrace_start_event(machine, event);
1292}
1293
1294int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1295 union perf_event *event,
1296 struct perf_sample *sample,
1297 struct machine *machine)
1298{
1299 return machine__process_lost_samples_event(machine, event, sample);
1300}
1301
1302int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1303 union perf_event *event,
1304 struct perf_sample *sample __maybe_unused,
1305 struct machine *machine)
1306{
1307 return machine__process_switch_event(machine, event);
1308}
1309
1310size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1311{
1312 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1313 event->mmap.pid, event->mmap.tid, event->mmap.start,
1314 event->mmap.len, event->mmap.pgoff,
1315 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1316 event->mmap.filename);
1317}
1318
1319size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1320{
1321 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1322 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1323 event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1324 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1325 event->mmap2.min, event->mmap2.ino,
1326 event->mmap2.ino_generation,
1327 (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1328 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1329 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1330 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1331 event->mmap2.filename);
1332}
1333
1334size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1335{
1336 struct thread_map *threads = thread_map__new_event(&event->thread_map);
1337 size_t ret;
1338
1339 ret = fprintf(fp, " nr: ");
1340
1341 if (threads)
1342 ret += thread_map__fprintf(threads, fp);
1343 else
1344 ret += fprintf(fp, "failed to get threads from event\n");
1345
1346 thread_map__put(threads);
1347 return ret;
1348}
1349
1350size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1351{
1352 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1353 size_t ret;
1354
1355 ret = fprintf(fp, ": ");
1356
1357 if (cpus)
1358 ret += cpu_map__fprintf(cpus, fp);
1359 else
1360 ret += fprintf(fp, "failed to get cpumap from event\n");
1361
1362 cpu_map__put(cpus);
1363 return ret;
1364}
1365
1366int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1367 union perf_event *event,
1368 struct perf_sample *sample,
1369 struct machine *machine)
1370{
1371 return machine__process_mmap_event(machine, event, sample);
1372}
1373
1374int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1375 union perf_event *event,
1376 struct perf_sample *sample,
1377 struct machine *machine)
1378{
1379 return machine__process_mmap2_event(machine, event, sample);
1380}
1381
1382size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1383{
1384 return fprintf(fp, "(%d:%d):(%d:%d)\n",
1385 event->fork.pid, event->fork.tid,
1386 event->fork.ppid, event->fork.ptid);
1387}
1388
1389int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1390 union perf_event *event,
1391 struct perf_sample *sample,
1392 struct machine *machine)
1393{
1394 return machine__process_fork_event(machine, event, sample);
1395}
1396
1397int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1398 union perf_event *event,
1399 struct perf_sample *sample,
1400 struct machine *machine)
1401{
1402 return machine__process_exit_event(machine, event, sample);
1403}
1404
1405size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1406{
1407 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1408 event->aux.aux_offset, event->aux.aux_size,
1409 event->aux.flags,
1410 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1411 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1412 event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : "");
1413}
1414
1415size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1416{
1417 return fprintf(fp, " pid: %u tid: %u\n",
1418 event->itrace_start.pid, event->itrace_start.tid);
1419}
1420
1421size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1422{
1423 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1424 const char *in_out = !out ? "IN " :
1425 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1426 "OUT " : "OUT preempt";
1427
1428 if (event->header.type == PERF_RECORD_SWITCH)
1429 return fprintf(fp, " %s\n", in_out);
1430
1431 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n",
1432 in_out, out ? "next" : "prev",
1433 event->context_switch.next_prev_pid,
1434 event->context_switch.next_prev_tid);
1435}
1436
1437static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
1438{
1439 return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
1440}
1441
1442size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1443{
1444 size_t ret = fprintf(fp, "PERF_RECORD_%s",
1445 perf_event__name(event->header.type));
1446
1447 switch (event->header.type) {
1448 case PERF_RECORD_COMM:
1449 ret += perf_event__fprintf_comm(event, fp);
1450 break;
1451 case PERF_RECORD_FORK:
1452 case PERF_RECORD_EXIT:
1453 ret += perf_event__fprintf_task(event, fp);
1454 break;
1455 case PERF_RECORD_MMAP:
1456 ret += perf_event__fprintf_mmap(event, fp);
1457 break;
1458 case PERF_RECORD_NAMESPACES:
1459 ret += perf_event__fprintf_namespaces(event, fp);
1460 break;
1461 case PERF_RECORD_MMAP2:
1462 ret += perf_event__fprintf_mmap2(event, fp);
1463 break;
1464 case PERF_RECORD_AUX:
1465 ret += perf_event__fprintf_aux(event, fp);
1466 break;
1467 case PERF_RECORD_ITRACE_START:
1468 ret += perf_event__fprintf_itrace_start(event, fp);
1469 break;
1470 case PERF_RECORD_SWITCH:
1471 case PERF_RECORD_SWITCH_CPU_WIDE:
1472 ret += perf_event__fprintf_switch(event, fp);
1473 break;
1474 case PERF_RECORD_LOST:
1475 ret += perf_event__fprintf_lost(event, fp);
1476 break;
1477 default:
1478 ret += fprintf(fp, "\n");
1479 }
1480
1481 return ret;
1482}
1483
1484int perf_event__process(struct perf_tool *tool __maybe_unused,
1485 union perf_event *event,
1486 struct perf_sample *sample,
1487 struct machine *machine)
1488{
1489 return machine__process_event(machine, event, sample);
1490}
1491
1492void thread__find_addr_map(struct thread *thread, u8 cpumode,
1493 enum map_type type, u64 addr,
1494 struct addr_location *al)
1495{
1496 struct map_groups *mg = thread->mg;
1497 struct machine *machine = mg->machine;
1498 bool load_map = false;
1499
1500 al->machine = machine;
1501 al->thread = thread;
1502 al->addr = addr;
1503 al->cpumode = cpumode;
1504 al->filtered = 0;
1505
1506 if (machine == NULL) {
1507 al->map = NULL;
1508 return;
1509 }
1510
1511 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1512 al->level = 'k';
1513 mg = &machine->kmaps;
1514 load_map = true;
1515 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1516 al->level = '.';
1517 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1518 al->level = 'g';
1519 mg = &machine->kmaps;
1520 load_map = true;
1521 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1522 al->level = 'u';
1523 } else {
1524 al->level = 'H';
1525 al->map = NULL;
1526
1527 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1528 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1529 !perf_guest)
1530 al->filtered |= (1 << HIST_FILTER__GUEST);
1531 if ((cpumode == PERF_RECORD_MISC_USER ||
1532 cpumode == PERF_RECORD_MISC_KERNEL) &&
1533 !perf_host)
1534 al->filtered |= (1 << HIST_FILTER__HOST);
1535
1536 return;
1537 }
1538try_again:
1539 al->map = map_groups__find(mg, type, al->addr);
1540 if (al->map == NULL) {
1541 /*
1542 * If this is outside of all known maps, and is a negative
1543 * address, try to look it up in the kernel dso, as it might be
1544 * a vsyscall or vdso (which executes in user-mode).
1545 *
1546 * XXX This is nasty, we should have a symbol list in the
1547 * "[vdso]" dso, but for now lets use the old trick of looking
1548 * in the whole kernel symbol list.
1549 */
1550 if (cpumode == PERF_RECORD_MISC_USER && machine &&
1551 mg != &machine->kmaps &&
1552 machine__kernel_ip(machine, al->addr)) {
1553 mg = &machine->kmaps;
1554 load_map = true;
1555 goto try_again;
1556 }
1557 } else {
1558 /*
1559 * Kernel maps might be changed when loading symbols so loading
1560 * must be done prior to using kernel maps.
1561 */
1562 if (load_map)
1563 map__load(al->map);
1564 al->addr = al->map->map_ip(al->map, al->addr);
1565 }
1566}
1567
1568void thread__find_addr_location(struct thread *thread,
1569 u8 cpumode, enum map_type type, u64 addr,
1570 struct addr_location *al)
1571{
1572 thread__find_addr_map(thread, cpumode, type, addr, al);
1573 if (al->map != NULL)
1574 al->sym = map__find_symbol(al->map, al->addr);
1575 else
1576 al->sym = NULL;
1577}
1578
1579/*
1580 * Callers need to drop the reference to al->thread, obtained in
1581 * machine__findnew_thread()
1582 */
1583int machine__resolve(struct machine *machine, struct addr_location *al,
1584 struct perf_sample *sample)
1585{
1586 struct thread *thread = machine__findnew_thread(machine, sample->pid,
1587 sample->tid);
1588
1589 if (thread == NULL)
1590 return -1;
1591
1592 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1593 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1594 dump_printf(" ...... dso: %s\n",
1595 al->map ? al->map->dso->long_name :
1596 al->level == 'H' ? "[hypervisor]" : "<not found>");
1597
1598 if (thread__is_filtered(thread))
1599 al->filtered |= (1 << HIST_FILTER__THREAD);
1600
1601 al->sym = NULL;
1602 al->cpu = sample->cpu;
1603 al->socket = -1;
1604 al->srcline = NULL;
1605
1606 if (al->cpu >= 0) {
1607 struct perf_env *env = machine->env;
1608
1609 if (env && env->cpu)
1610 al->socket = env->cpu[al->cpu].socket_id;
1611 }
1612
1613 if (al->map) {
1614 struct dso *dso = al->map->dso;
1615
1616 if (symbol_conf.dso_list &&
1617 (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1618 dso->short_name) ||
1619 (dso->short_name != dso->long_name &&
1620 strlist__has_entry(symbol_conf.dso_list,
1621 dso->long_name))))) {
1622 al->filtered |= (1 << HIST_FILTER__DSO);
1623 }
1624
1625 al->sym = map__find_symbol(al->map, al->addr);
1626 }
1627
1628 if (symbol_conf.sym_list &&
1629 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1630 al->sym->name))) {
1631 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1632 }
1633
1634 return 0;
1635}
1636
1637/*
1638 * The preprocess_sample method will return with reference counts for the
1639 * in it, when done using (and perhaps getting ref counts if needing to
1640 * keep a pointer to one of those entries) it must be paired with
1641 * addr_location__put(), so that the refcounts can be decremented.
1642 */
1643void addr_location__put(struct addr_location *al)
1644{
1645 thread__zput(al->thread);
1646}
1647
1648bool is_bts_event(struct perf_event_attr *attr)
1649{
1650 return attr->type == PERF_TYPE_HARDWARE &&
1651 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1652 attr->sample_period == 1;
1653}
1654
1655bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1656{
1657 if (attr->type == PERF_TYPE_SOFTWARE &&
1658 (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1659 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1660 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1661 return true;
1662
1663 if (is_bts_event(attr))
1664 return true;
1665
1666 return false;
1667}
1668
1669void thread__resolve(struct thread *thread, struct addr_location *al,
1670 struct perf_sample *sample)
1671{
1672 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1673 if (!al->map)
1674 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1675 sample->addr, al);
1676
1677 al->cpu = sample->cpu;
1678 al->sym = NULL;
1679
1680 if (al->map)
1681 al->sym = map__find_symbol(al->map, al->addr);
1682}
1#include <linux/types.h>
2#include <sys/mman.h>
3#include "event.h"
4#include "debug.h"
5#include "hist.h"
6#include "machine.h"
7#include "sort.h"
8#include "string.h"
9#include "strlist.h"
10#include "thread.h"
11#include "thread_map.h"
12#include "symbol/kallsyms.h"
13#include "asm/bug.h"
14#include "stat.h"
15
16static const char *perf_event__names[] = {
17 [0] = "TOTAL",
18 [PERF_RECORD_MMAP] = "MMAP",
19 [PERF_RECORD_MMAP2] = "MMAP2",
20 [PERF_RECORD_LOST] = "LOST",
21 [PERF_RECORD_COMM] = "COMM",
22 [PERF_RECORD_EXIT] = "EXIT",
23 [PERF_RECORD_THROTTLE] = "THROTTLE",
24 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
25 [PERF_RECORD_FORK] = "FORK",
26 [PERF_RECORD_READ] = "READ",
27 [PERF_RECORD_SAMPLE] = "SAMPLE",
28 [PERF_RECORD_AUX] = "AUX",
29 [PERF_RECORD_ITRACE_START] = "ITRACE_START",
30 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES",
31 [PERF_RECORD_SWITCH] = "SWITCH",
32 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
33 [PERF_RECORD_HEADER_ATTR] = "ATTR",
34 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
35 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
36 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
37 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
38 [PERF_RECORD_ID_INDEX] = "ID_INDEX",
39 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO",
40 [PERF_RECORD_AUXTRACE] = "AUXTRACE",
41 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR",
42 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP",
43 [PERF_RECORD_CPU_MAP] = "CPU_MAP",
44 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG",
45 [PERF_RECORD_STAT] = "STAT",
46 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND",
47 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
48};
49
50const char *perf_event__name(unsigned int id)
51{
52 if (id >= ARRAY_SIZE(perf_event__names))
53 return "INVALID";
54 if (!perf_event__names[id])
55 return "UNKNOWN";
56 return perf_event__names[id];
57}
58
59static int perf_tool__process_synth_event(struct perf_tool *tool,
60 union perf_event *event,
61 struct machine *machine,
62 perf_event__handler_t process)
63{
64 struct perf_sample synth_sample = {
65 .pid = -1,
66 .tid = -1,
67 .time = -1,
68 .stream_id = -1,
69 .cpu = -1,
70 .period = 1,
71 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
72 };
73
74 return process(tool, event, &synth_sample, machine);
75};
76
77/*
78 * Assumes that the first 4095 bytes of /proc/pid/stat contains
79 * the comm, tgid and ppid.
80 */
81static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
82 pid_t *tgid, pid_t *ppid)
83{
84 char filename[PATH_MAX];
85 char bf[4096];
86 int fd;
87 size_t size = 0;
88 ssize_t n;
89 char *nl, *name, *tgids, *ppids;
90
91 *tgid = -1;
92 *ppid = -1;
93
94 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
95
96 fd = open(filename, O_RDONLY);
97 if (fd < 0) {
98 pr_debug("couldn't open %s\n", filename);
99 return -1;
100 }
101
102 n = read(fd, bf, sizeof(bf) - 1);
103 close(fd);
104 if (n <= 0) {
105 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
106 pid);
107 return -1;
108 }
109 bf[n] = '\0';
110
111 name = strstr(bf, "Name:");
112 tgids = strstr(bf, "Tgid:");
113 ppids = strstr(bf, "PPid:");
114
115 if (name) {
116 name += 5; /* strlen("Name:") */
117
118 while (*name && isspace(*name))
119 ++name;
120
121 nl = strchr(name, '\n');
122 if (nl)
123 *nl = '\0';
124
125 size = strlen(name);
126 if (size >= len)
127 size = len - 1;
128 memcpy(comm, name, size);
129 comm[size] = '\0';
130 } else {
131 pr_debug("Name: string not found for pid %d\n", pid);
132 }
133
134 if (tgids) {
135 tgids += 5; /* strlen("Tgid:") */
136 *tgid = atoi(tgids);
137 } else {
138 pr_debug("Tgid: string not found for pid %d\n", pid);
139 }
140
141 if (ppids) {
142 ppids += 5; /* strlen("PPid:") */
143 *ppid = atoi(ppids);
144 } else {
145 pr_debug("PPid: string not found for pid %d\n", pid);
146 }
147
148 return 0;
149}
150
151static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
152 struct machine *machine,
153 pid_t *tgid, pid_t *ppid)
154{
155 size_t size;
156
157 *ppid = -1;
158
159 memset(&event->comm, 0, sizeof(event->comm));
160
161 if (machine__is_host(machine)) {
162 if (perf_event__get_comm_ids(pid, event->comm.comm,
163 sizeof(event->comm.comm),
164 tgid, ppid) != 0) {
165 return -1;
166 }
167 } else {
168 *tgid = machine->pid;
169 }
170
171 if (*tgid < 0)
172 return -1;
173
174 event->comm.pid = *tgid;
175 event->comm.header.type = PERF_RECORD_COMM;
176
177 size = strlen(event->comm.comm) + 1;
178 size = PERF_ALIGN(size, sizeof(u64));
179 memset(event->comm.comm + size, 0, machine->id_hdr_size);
180 event->comm.header.size = (sizeof(event->comm) -
181 (sizeof(event->comm.comm) - size) +
182 machine->id_hdr_size);
183 event->comm.tid = pid;
184
185 return 0;
186}
187
188pid_t perf_event__synthesize_comm(struct perf_tool *tool,
189 union perf_event *event, pid_t pid,
190 perf_event__handler_t process,
191 struct machine *machine)
192{
193 pid_t tgid, ppid;
194
195 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
196 return -1;
197
198 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
199 return -1;
200
201 return tgid;
202}
203
204static int perf_event__synthesize_fork(struct perf_tool *tool,
205 union perf_event *event,
206 pid_t pid, pid_t tgid, pid_t ppid,
207 perf_event__handler_t process,
208 struct machine *machine)
209{
210 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
211
212 /*
213 * for main thread set parent to ppid from status file. For other
214 * threads set parent pid to main thread. ie., assume main thread
215 * spawns all threads in a process
216 */
217 if (tgid == pid) {
218 event->fork.ppid = ppid;
219 event->fork.ptid = ppid;
220 } else {
221 event->fork.ppid = tgid;
222 event->fork.ptid = tgid;
223 }
224 event->fork.pid = tgid;
225 event->fork.tid = pid;
226 event->fork.header.type = PERF_RECORD_FORK;
227
228 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
229
230 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
231 return -1;
232
233 return 0;
234}
235
236int perf_event__synthesize_mmap_events(struct perf_tool *tool,
237 union perf_event *event,
238 pid_t pid, pid_t tgid,
239 perf_event__handler_t process,
240 struct machine *machine,
241 bool mmap_data,
242 unsigned int proc_map_timeout)
243{
244 char filename[PATH_MAX];
245 FILE *fp;
246 unsigned long long t;
247 bool truncation = false;
248 unsigned long long timeout = proc_map_timeout * 1000000ULL;
249 int rc = 0;
250
251 if (machine__is_default_guest(machine))
252 return 0;
253
254 snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
255 machine->root_dir, pid);
256
257 fp = fopen(filename, "r");
258 if (fp == NULL) {
259 /*
260 * We raced with a task exiting - just return:
261 */
262 pr_debug("couldn't open %s\n", filename);
263 return -1;
264 }
265
266 event->header.type = PERF_RECORD_MMAP2;
267 t = rdclock();
268
269 while (1) {
270 char bf[BUFSIZ];
271 char prot[5];
272 char execname[PATH_MAX];
273 char anonstr[] = "//anon";
274 unsigned int ino;
275 size_t size;
276 ssize_t n;
277
278 if (fgets(bf, sizeof(bf), fp) == NULL)
279 break;
280
281 if ((rdclock() - t) > timeout) {
282 pr_warning("Reading %s time out. "
283 "You may want to increase "
284 "the time limit by --proc-map-timeout\n",
285 filename);
286 truncation = true;
287 goto out;
288 }
289
290 /* ensure null termination since stack will be reused. */
291 strcpy(execname, "");
292
293 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
294 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
295 &event->mmap2.start, &event->mmap2.len, prot,
296 &event->mmap2.pgoff, &event->mmap2.maj,
297 &event->mmap2.min,
298 &ino, execname);
299
300 /*
301 * Anon maps don't have the execname.
302 */
303 if (n < 7)
304 continue;
305
306 event->mmap2.ino = (u64)ino;
307
308 /*
309 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
310 */
311 if (machine__is_host(machine))
312 event->header.misc = PERF_RECORD_MISC_USER;
313 else
314 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
315
316 /* map protection and flags bits */
317 event->mmap2.prot = 0;
318 event->mmap2.flags = 0;
319 if (prot[0] == 'r')
320 event->mmap2.prot |= PROT_READ;
321 if (prot[1] == 'w')
322 event->mmap2.prot |= PROT_WRITE;
323 if (prot[2] == 'x')
324 event->mmap2.prot |= PROT_EXEC;
325
326 if (prot[3] == 's')
327 event->mmap2.flags |= MAP_SHARED;
328 else
329 event->mmap2.flags |= MAP_PRIVATE;
330
331 if (prot[2] != 'x') {
332 if (!mmap_data || prot[0] != 'r')
333 continue;
334
335 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
336 }
337
338out:
339 if (truncation)
340 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
341
342 if (!strcmp(execname, ""))
343 strcpy(execname, anonstr);
344
345 size = strlen(execname) + 1;
346 memcpy(event->mmap2.filename, execname, size);
347 size = PERF_ALIGN(size, sizeof(u64));
348 event->mmap2.len -= event->mmap.start;
349 event->mmap2.header.size = (sizeof(event->mmap2) -
350 (sizeof(event->mmap2.filename) - size));
351 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
352 event->mmap2.header.size += machine->id_hdr_size;
353 event->mmap2.pid = tgid;
354 event->mmap2.tid = pid;
355
356 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
357 rc = -1;
358 break;
359 }
360
361 if (truncation)
362 break;
363 }
364
365 fclose(fp);
366 return rc;
367}
368
369int perf_event__synthesize_modules(struct perf_tool *tool,
370 perf_event__handler_t process,
371 struct machine *machine)
372{
373 int rc = 0;
374 struct map *pos;
375 struct map_groups *kmaps = &machine->kmaps;
376 struct maps *maps = &kmaps->maps[MAP__FUNCTION];
377 union perf_event *event = zalloc((sizeof(event->mmap) +
378 machine->id_hdr_size));
379 if (event == NULL) {
380 pr_debug("Not enough memory synthesizing mmap event "
381 "for kernel modules\n");
382 return -1;
383 }
384
385 event->header.type = PERF_RECORD_MMAP;
386
387 /*
388 * kernel uses 0 for user space maps, see kernel/perf_event.c
389 * __perf_event_mmap
390 */
391 if (machine__is_host(machine))
392 event->header.misc = PERF_RECORD_MISC_KERNEL;
393 else
394 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
395
396 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
397 size_t size;
398
399 if (__map__is_kernel(pos))
400 continue;
401
402 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
403 event->mmap.header.type = PERF_RECORD_MMAP;
404 event->mmap.header.size = (sizeof(event->mmap) -
405 (sizeof(event->mmap.filename) - size));
406 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
407 event->mmap.header.size += machine->id_hdr_size;
408 event->mmap.start = pos->start;
409 event->mmap.len = pos->end - pos->start;
410 event->mmap.pid = machine->pid;
411
412 memcpy(event->mmap.filename, pos->dso->long_name,
413 pos->dso->long_name_len + 1);
414 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
415 rc = -1;
416 break;
417 }
418 }
419
420 free(event);
421 return rc;
422}
423
424static int __event__synthesize_thread(union perf_event *comm_event,
425 union perf_event *mmap_event,
426 union perf_event *fork_event,
427 pid_t pid, int full,
428 perf_event__handler_t process,
429 struct perf_tool *tool,
430 struct machine *machine,
431 bool mmap_data,
432 unsigned int proc_map_timeout)
433{
434 char filename[PATH_MAX];
435 DIR *tasks;
436 struct dirent *dirent;
437 pid_t tgid, ppid;
438 int rc = 0;
439
440 /* special case: only send one comm event using passed in pid */
441 if (!full) {
442 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
443 process, machine);
444
445 if (tgid == -1)
446 return -1;
447
448 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
449 process, machine, mmap_data,
450 proc_map_timeout);
451 }
452
453 if (machine__is_default_guest(machine))
454 return 0;
455
456 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
457 machine->root_dir, pid);
458
459 tasks = opendir(filename);
460 if (tasks == NULL) {
461 pr_debug("couldn't open %s\n", filename);
462 return 0;
463 }
464
465 while ((dirent = readdir(tasks)) != NULL) {
466 char *end;
467 pid_t _pid;
468
469 _pid = strtol(dirent->d_name, &end, 10);
470 if (*end)
471 continue;
472
473 rc = -1;
474 if (perf_event__prepare_comm(comm_event, _pid, machine,
475 &tgid, &ppid) != 0)
476 break;
477
478 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
479 ppid, process, machine) < 0)
480 break;
481 /*
482 * Send the prepared comm event
483 */
484 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
485 break;
486
487 rc = 0;
488 if (_pid == pid) {
489 /* process the parent's maps too */
490 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
491 process, machine, mmap_data, proc_map_timeout);
492 if (rc)
493 break;
494 }
495 }
496
497 closedir(tasks);
498 return rc;
499}
500
501int perf_event__synthesize_thread_map(struct perf_tool *tool,
502 struct thread_map *threads,
503 perf_event__handler_t process,
504 struct machine *machine,
505 bool mmap_data,
506 unsigned int proc_map_timeout)
507{
508 union perf_event *comm_event, *mmap_event, *fork_event;
509 int err = -1, thread, j;
510
511 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
512 if (comm_event == NULL)
513 goto out;
514
515 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
516 if (mmap_event == NULL)
517 goto out_free_comm;
518
519 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
520 if (fork_event == NULL)
521 goto out_free_mmap;
522
523 err = 0;
524 for (thread = 0; thread < threads->nr; ++thread) {
525 if (__event__synthesize_thread(comm_event, mmap_event,
526 fork_event,
527 thread_map__pid(threads, thread), 0,
528 process, tool, machine,
529 mmap_data, proc_map_timeout)) {
530 err = -1;
531 break;
532 }
533
534 /*
535 * comm.pid is set to thread group id by
536 * perf_event__synthesize_comm
537 */
538 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
539 bool need_leader = true;
540
541 /* is thread group leader in thread_map? */
542 for (j = 0; j < threads->nr; ++j) {
543 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
544 need_leader = false;
545 break;
546 }
547 }
548
549 /* if not, generate events for it */
550 if (need_leader &&
551 __event__synthesize_thread(comm_event, mmap_event,
552 fork_event,
553 comm_event->comm.pid, 0,
554 process, tool, machine,
555 mmap_data, proc_map_timeout)) {
556 err = -1;
557 break;
558 }
559 }
560 }
561 free(fork_event);
562out_free_mmap:
563 free(mmap_event);
564out_free_comm:
565 free(comm_event);
566out:
567 return err;
568}
569
570int perf_event__synthesize_threads(struct perf_tool *tool,
571 perf_event__handler_t process,
572 struct machine *machine,
573 bool mmap_data,
574 unsigned int proc_map_timeout)
575{
576 DIR *proc;
577 char proc_path[PATH_MAX];
578 struct dirent *dirent;
579 union perf_event *comm_event, *mmap_event, *fork_event;
580 int err = -1;
581
582 if (machine__is_default_guest(machine))
583 return 0;
584
585 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
586 if (comm_event == NULL)
587 goto out;
588
589 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
590 if (mmap_event == NULL)
591 goto out_free_comm;
592
593 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
594 if (fork_event == NULL)
595 goto out_free_mmap;
596
597 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
598 proc = opendir(proc_path);
599
600 if (proc == NULL)
601 goto out_free_fork;
602
603 while ((dirent = readdir(proc)) != NULL) {
604 char *end;
605 pid_t pid = strtol(dirent->d_name, &end, 10);
606
607 if (*end) /* only interested in proper numerical dirents */
608 continue;
609 /*
610 * We may race with exiting thread, so don't stop just because
611 * one thread couldn't be synthesized.
612 */
613 __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
614 1, process, tool, machine, mmap_data,
615 proc_map_timeout);
616 }
617
618 err = 0;
619 closedir(proc);
620out_free_fork:
621 free(fork_event);
622out_free_mmap:
623 free(mmap_event);
624out_free_comm:
625 free(comm_event);
626out:
627 return err;
628}
629
630struct process_symbol_args {
631 const char *name;
632 u64 start;
633};
634
635static int find_symbol_cb(void *arg, const char *name, char type,
636 u64 start)
637{
638 struct process_symbol_args *args = arg;
639
640 /*
641 * Must be a function or at least an alias, as in PARISC64, where "_text" is
642 * an 'A' to the same address as "_stext".
643 */
644 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
645 type == 'A') || strcmp(name, args->name))
646 return 0;
647
648 args->start = start;
649 return 1;
650}
651
652u64 kallsyms__get_function_start(const char *kallsyms_filename,
653 const char *symbol_name)
654{
655 struct process_symbol_args args = { .name = symbol_name, };
656
657 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
658 return 0;
659
660 return args.start;
661}
662
663int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
664 perf_event__handler_t process,
665 struct machine *machine)
666{
667 size_t size;
668 const char *mmap_name;
669 char name_buff[PATH_MAX];
670 struct map *map = machine__kernel_map(machine);
671 struct kmap *kmap;
672 int err;
673 union perf_event *event;
674
675 if (map == NULL)
676 return -1;
677
678 /*
679 * We should get this from /sys/kernel/sections/.text, but till that is
680 * available use this, and after it is use this as a fallback for older
681 * kernels.
682 */
683 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
684 if (event == NULL) {
685 pr_debug("Not enough memory synthesizing mmap event "
686 "for kernel modules\n");
687 return -1;
688 }
689
690 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
691 if (machine__is_host(machine)) {
692 /*
693 * kernel uses PERF_RECORD_MISC_USER for user space maps,
694 * see kernel/perf_event.c __perf_event_mmap
695 */
696 event->header.misc = PERF_RECORD_MISC_KERNEL;
697 } else {
698 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
699 }
700
701 kmap = map__kmap(map);
702 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
703 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
704 size = PERF_ALIGN(size, sizeof(u64));
705 event->mmap.header.type = PERF_RECORD_MMAP;
706 event->mmap.header.size = (sizeof(event->mmap) -
707 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
708 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
709 event->mmap.start = map->start;
710 event->mmap.len = map->end - event->mmap.start;
711 event->mmap.pid = machine->pid;
712
713 err = perf_tool__process_synth_event(tool, event, machine, process);
714 free(event);
715
716 return err;
717}
718
719int perf_event__synthesize_thread_map2(struct perf_tool *tool,
720 struct thread_map *threads,
721 perf_event__handler_t process,
722 struct machine *machine)
723{
724 union perf_event *event;
725 int i, err, size;
726
727 size = sizeof(event->thread_map);
728 size += threads->nr * sizeof(event->thread_map.entries[0]);
729
730 event = zalloc(size);
731 if (!event)
732 return -ENOMEM;
733
734 event->header.type = PERF_RECORD_THREAD_MAP;
735 event->header.size = size;
736 event->thread_map.nr = threads->nr;
737
738 for (i = 0; i < threads->nr; i++) {
739 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
740 char *comm = thread_map__comm(threads, i);
741
742 if (!comm)
743 comm = (char *) "";
744
745 entry->pid = thread_map__pid(threads, i);
746 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
747 }
748
749 err = process(tool, event, NULL, machine);
750
751 free(event);
752 return err;
753}
754
755static void synthesize_cpus(struct cpu_map_entries *cpus,
756 struct cpu_map *map)
757{
758 int i;
759
760 cpus->nr = map->nr;
761
762 for (i = 0; i < map->nr; i++)
763 cpus->cpu[i] = map->map[i];
764}
765
766static void synthesize_mask(struct cpu_map_mask *mask,
767 struct cpu_map *map, int max)
768{
769 int i;
770
771 mask->nr = BITS_TO_LONGS(max);
772 mask->long_size = sizeof(long);
773
774 for (i = 0; i < map->nr; i++)
775 set_bit(map->map[i], mask->mask);
776}
777
778static size_t cpus_size(struct cpu_map *map)
779{
780 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
781}
782
783static size_t mask_size(struct cpu_map *map, int *max)
784{
785 int i;
786
787 *max = 0;
788
789 for (i = 0; i < map->nr; i++) {
790 /* bit possition of the cpu is + 1 */
791 int bit = map->map[i] + 1;
792
793 if (bit > *max)
794 *max = bit;
795 }
796
797 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
798}
799
800void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
801{
802 size_t size_cpus, size_mask;
803 bool is_dummy = cpu_map__empty(map);
804
805 /*
806 * Both array and mask data have variable size based
807 * on the number of cpus and their actual values.
808 * The size of the 'struct cpu_map_data' is:
809 *
810 * array = size of 'struct cpu_map_entries' +
811 * number of cpus * sizeof(u64)
812 *
813 * mask = size of 'struct cpu_map_mask' +
814 * maximum cpu bit converted to size of longs
815 *
816 * and finaly + the size of 'struct cpu_map_data'.
817 */
818 size_cpus = cpus_size(map);
819 size_mask = mask_size(map, max);
820
821 if (is_dummy || (size_cpus < size_mask)) {
822 *size += size_cpus;
823 *type = PERF_CPU_MAP__CPUS;
824 } else {
825 *size += size_mask;
826 *type = PERF_CPU_MAP__MASK;
827 }
828
829 *size += sizeof(struct cpu_map_data);
830 return zalloc(*size);
831}
832
833void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
834 u16 type, int max)
835{
836 data->type = type;
837
838 switch (type) {
839 case PERF_CPU_MAP__CPUS:
840 synthesize_cpus((struct cpu_map_entries *) data->data, map);
841 break;
842 case PERF_CPU_MAP__MASK:
843 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
844 default:
845 break;
846 };
847}
848
849static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
850{
851 size_t size = sizeof(struct cpu_map_event);
852 struct cpu_map_event *event;
853 int max;
854 u16 type;
855
856 event = cpu_map_data__alloc(map, &size, &type, &max);
857 if (!event)
858 return NULL;
859
860 event->header.type = PERF_RECORD_CPU_MAP;
861 event->header.size = size;
862 event->data.type = type;
863
864 cpu_map_data__synthesize(&event->data, map, type, max);
865 return event;
866}
867
868int perf_event__synthesize_cpu_map(struct perf_tool *tool,
869 struct cpu_map *map,
870 perf_event__handler_t process,
871 struct machine *machine)
872{
873 struct cpu_map_event *event;
874 int err;
875
876 event = cpu_map_event__new(map);
877 if (!event)
878 return -ENOMEM;
879
880 err = process(tool, (union perf_event *) event, NULL, machine);
881
882 free(event);
883 return err;
884}
885
886int perf_event__synthesize_stat_config(struct perf_tool *tool,
887 struct perf_stat_config *config,
888 perf_event__handler_t process,
889 struct machine *machine)
890{
891 struct stat_config_event *event;
892 int size, i = 0, err;
893
894 size = sizeof(*event);
895 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
896
897 event = zalloc(size);
898 if (!event)
899 return -ENOMEM;
900
901 event->header.type = PERF_RECORD_STAT_CONFIG;
902 event->header.size = size;
903 event->nr = PERF_STAT_CONFIG_TERM__MAX;
904
905#define ADD(__term, __val) \
906 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
907 event->data[i].val = __val; \
908 i++;
909
910 ADD(AGGR_MODE, config->aggr_mode)
911 ADD(INTERVAL, config->interval)
912 ADD(SCALE, config->scale)
913
914 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
915 "stat config terms unbalanced\n");
916#undef ADD
917
918 err = process(tool, (union perf_event *) event, NULL, machine);
919
920 free(event);
921 return err;
922}
923
924int perf_event__synthesize_stat(struct perf_tool *tool,
925 u32 cpu, u32 thread, u64 id,
926 struct perf_counts_values *count,
927 perf_event__handler_t process,
928 struct machine *machine)
929{
930 struct stat_event event;
931
932 event.header.type = PERF_RECORD_STAT;
933 event.header.size = sizeof(event);
934 event.header.misc = 0;
935
936 event.id = id;
937 event.cpu = cpu;
938 event.thread = thread;
939 event.val = count->val;
940 event.ena = count->ena;
941 event.run = count->run;
942
943 return process(tool, (union perf_event *) &event, NULL, machine);
944}
945
946int perf_event__synthesize_stat_round(struct perf_tool *tool,
947 u64 evtime, u64 type,
948 perf_event__handler_t process,
949 struct machine *machine)
950{
951 struct stat_round_event event;
952
953 event.header.type = PERF_RECORD_STAT_ROUND;
954 event.header.size = sizeof(event);
955 event.header.misc = 0;
956
957 event.time = evtime;
958 event.type = type;
959
960 return process(tool, (union perf_event *) &event, NULL, machine);
961}
962
963void perf_event__read_stat_config(struct perf_stat_config *config,
964 struct stat_config_event *event)
965{
966 unsigned i;
967
968 for (i = 0; i < event->nr; i++) {
969
970 switch (event->data[i].tag) {
971#define CASE(__term, __val) \
972 case PERF_STAT_CONFIG_TERM__##__term: \
973 config->__val = event->data[i].val; \
974 break;
975
976 CASE(AGGR_MODE, aggr_mode)
977 CASE(SCALE, scale)
978 CASE(INTERVAL, interval)
979#undef CASE
980 default:
981 pr_warning("unknown stat config term %" PRIu64 "\n",
982 event->data[i].tag);
983 }
984 }
985}
986
987size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
988{
989 const char *s;
990
991 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
992 s = " exec";
993 else
994 s = "";
995
996 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
997}
998
999int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1000 union perf_event *event,
1001 struct perf_sample *sample,
1002 struct machine *machine)
1003{
1004 return machine__process_comm_event(machine, event, sample);
1005}
1006
1007int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1008 union perf_event *event,
1009 struct perf_sample *sample,
1010 struct machine *machine)
1011{
1012 return machine__process_lost_event(machine, event, sample);
1013}
1014
1015int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1016 union perf_event *event,
1017 struct perf_sample *sample __maybe_unused,
1018 struct machine *machine)
1019{
1020 return machine__process_aux_event(machine, event);
1021}
1022
1023int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1024 union perf_event *event,
1025 struct perf_sample *sample __maybe_unused,
1026 struct machine *machine)
1027{
1028 return machine__process_itrace_start_event(machine, event);
1029}
1030
1031int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1032 union perf_event *event,
1033 struct perf_sample *sample,
1034 struct machine *machine)
1035{
1036 return machine__process_lost_samples_event(machine, event, sample);
1037}
1038
1039int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1040 union perf_event *event,
1041 struct perf_sample *sample __maybe_unused,
1042 struct machine *machine)
1043{
1044 return machine__process_switch_event(machine, event);
1045}
1046
1047size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1048{
1049 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1050 event->mmap.pid, event->mmap.tid, event->mmap.start,
1051 event->mmap.len, event->mmap.pgoff,
1052 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1053 event->mmap.filename);
1054}
1055
1056size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1057{
1058 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1059 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1060 event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1061 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1062 event->mmap2.min, event->mmap2.ino,
1063 event->mmap2.ino_generation,
1064 (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1065 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1066 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1067 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1068 event->mmap2.filename);
1069}
1070
1071size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1072{
1073 struct thread_map *threads = thread_map__new_event(&event->thread_map);
1074 size_t ret;
1075
1076 ret = fprintf(fp, " nr: ");
1077
1078 if (threads)
1079 ret += thread_map__fprintf(threads, fp);
1080 else
1081 ret += fprintf(fp, "failed to get threads from event\n");
1082
1083 thread_map__put(threads);
1084 return ret;
1085}
1086
1087size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1088{
1089 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1090 size_t ret;
1091
1092 ret = fprintf(fp, " nr: ");
1093
1094 if (cpus)
1095 ret += cpu_map__fprintf(cpus, fp);
1096 else
1097 ret += fprintf(fp, "failed to get cpumap from event\n");
1098
1099 cpu_map__put(cpus);
1100 return ret;
1101}
1102
1103int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1104 union perf_event *event,
1105 struct perf_sample *sample,
1106 struct machine *machine)
1107{
1108 return machine__process_mmap_event(machine, event, sample);
1109}
1110
1111int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1112 union perf_event *event,
1113 struct perf_sample *sample,
1114 struct machine *machine)
1115{
1116 return machine__process_mmap2_event(machine, event, sample);
1117}
1118
1119size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1120{
1121 return fprintf(fp, "(%d:%d):(%d:%d)\n",
1122 event->fork.pid, event->fork.tid,
1123 event->fork.ppid, event->fork.ptid);
1124}
1125
1126int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1127 union perf_event *event,
1128 struct perf_sample *sample,
1129 struct machine *machine)
1130{
1131 return machine__process_fork_event(machine, event, sample);
1132}
1133
1134int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1135 union perf_event *event,
1136 struct perf_sample *sample,
1137 struct machine *machine)
1138{
1139 return machine__process_exit_event(machine, event, sample);
1140}
1141
1142size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1143{
1144 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
1145 event->aux.aux_offset, event->aux.aux_size,
1146 event->aux.flags,
1147 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1148 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
1149}
1150
1151size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1152{
1153 return fprintf(fp, " pid: %u tid: %u\n",
1154 event->itrace_start.pid, event->itrace_start.tid);
1155}
1156
1157size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1158{
1159 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1160 const char *in_out = out ? "OUT" : "IN ";
1161
1162 if (event->header.type == PERF_RECORD_SWITCH)
1163 return fprintf(fp, " %s\n", in_out);
1164
1165 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n",
1166 in_out, out ? "next" : "prev",
1167 event->context_switch.next_prev_pid,
1168 event->context_switch.next_prev_tid);
1169}
1170
1171size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1172{
1173 size_t ret = fprintf(fp, "PERF_RECORD_%s",
1174 perf_event__name(event->header.type));
1175
1176 switch (event->header.type) {
1177 case PERF_RECORD_COMM:
1178 ret += perf_event__fprintf_comm(event, fp);
1179 break;
1180 case PERF_RECORD_FORK:
1181 case PERF_RECORD_EXIT:
1182 ret += perf_event__fprintf_task(event, fp);
1183 break;
1184 case PERF_RECORD_MMAP:
1185 ret += perf_event__fprintf_mmap(event, fp);
1186 break;
1187 case PERF_RECORD_MMAP2:
1188 ret += perf_event__fprintf_mmap2(event, fp);
1189 break;
1190 case PERF_RECORD_AUX:
1191 ret += perf_event__fprintf_aux(event, fp);
1192 break;
1193 case PERF_RECORD_ITRACE_START:
1194 ret += perf_event__fprintf_itrace_start(event, fp);
1195 break;
1196 case PERF_RECORD_SWITCH:
1197 case PERF_RECORD_SWITCH_CPU_WIDE:
1198 ret += perf_event__fprintf_switch(event, fp);
1199 break;
1200 default:
1201 ret += fprintf(fp, "\n");
1202 }
1203
1204 return ret;
1205}
1206
1207int perf_event__process(struct perf_tool *tool __maybe_unused,
1208 union perf_event *event,
1209 struct perf_sample *sample,
1210 struct machine *machine)
1211{
1212 return machine__process_event(machine, event, sample);
1213}
1214
1215void thread__find_addr_map(struct thread *thread, u8 cpumode,
1216 enum map_type type, u64 addr,
1217 struct addr_location *al)
1218{
1219 struct map_groups *mg = thread->mg;
1220 struct machine *machine = mg->machine;
1221 bool load_map = false;
1222
1223 al->machine = machine;
1224 al->thread = thread;
1225 al->addr = addr;
1226 al->cpumode = cpumode;
1227 al->filtered = 0;
1228
1229 if (machine == NULL) {
1230 al->map = NULL;
1231 return;
1232 }
1233
1234 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1235 al->level = 'k';
1236 mg = &machine->kmaps;
1237 load_map = true;
1238 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1239 al->level = '.';
1240 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1241 al->level = 'g';
1242 mg = &machine->kmaps;
1243 load_map = true;
1244 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1245 al->level = 'u';
1246 } else {
1247 al->level = 'H';
1248 al->map = NULL;
1249
1250 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1251 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1252 !perf_guest)
1253 al->filtered |= (1 << HIST_FILTER__GUEST);
1254 if ((cpumode == PERF_RECORD_MISC_USER ||
1255 cpumode == PERF_RECORD_MISC_KERNEL) &&
1256 !perf_host)
1257 al->filtered |= (1 << HIST_FILTER__HOST);
1258
1259 return;
1260 }
1261try_again:
1262 al->map = map_groups__find(mg, type, al->addr);
1263 if (al->map == NULL) {
1264 /*
1265 * If this is outside of all known maps, and is a negative
1266 * address, try to look it up in the kernel dso, as it might be
1267 * a vsyscall or vdso (which executes in user-mode).
1268 *
1269 * XXX This is nasty, we should have a symbol list in the
1270 * "[vdso]" dso, but for now lets use the old trick of looking
1271 * in the whole kernel symbol list.
1272 */
1273 if (cpumode == PERF_RECORD_MISC_USER && machine &&
1274 mg != &machine->kmaps &&
1275 machine__kernel_ip(machine, al->addr)) {
1276 mg = &machine->kmaps;
1277 load_map = true;
1278 goto try_again;
1279 }
1280 } else {
1281 /*
1282 * Kernel maps might be changed when loading symbols so loading
1283 * must be done prior to using kernel maps.
1284 */
1285 if (load_map)
1286 map__load(al->map, machine->symbol_filter);
1287 al->addr = al->map->map_ip(al->map, al->addr);
1288 }
1289}
1290
1291void thread__find_addr_location(struct thread *thread,
1292 u8 cpumode, enum map_type type, u64 addr,
1293 struct addr_location *al)
1294{
1295 thread__find_addr_map(thread, cpumode, type, addr, al);
1296 if (al->map != NULL)
1297 al->sym = map__find_symbol(al->map, al->addr,
1298 thread->mg->machine->symbol_filter);
1299 else
1300 al->sym = NULL;
1301}
1302
1303/*
1304 * Callers need to drop the reference to al->thread, obtained in
1305 * machine__findnew_thread()
1306 */
1307int machine__resolve(struct machine *machine, struct addr_location *al,
1308 struct perf_sample *sample)
1309{
1310 struct thread *thread = machine__findnew_thread(machine, sample->pid,
1311 sample->tid);
1312
1313 if (thread == NULL)
1314 return -1;
1315
1316 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1317 /*
1318 * Have we already created the kernel maps for this machine?
1319 *
1320 * This should have happened earlier, when we processed the kernel MMAP
1321 * events, but for older perf.data files there was no such thing, so do
1322 * it now.
1323 */
1324 if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1325 machine__kernel_map(machine) == NULL)
1326 machine__create_kernel_maps(machine);
1327
1328 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1329 dump_printf(" ...... dso: %s\n",
1330 al->map ? al->map->dso->long_name :
1331 al->level == 'H' ? "[hypervisor]" : "<not found>");
1332
1333 if (thread__is_filtered(thread))
1334 al->filtered |= (1 << HIST_FILTER__THREAD);
1335
1336 al->sym = NULL;
1337 al->cpu = sample->cpu;
1338 al->socket = -1;
1339
1340 if (al->cpu >= 0) {
1341 struct perf_env *env = machine->env;
1342
1343 if (env && env->cpu)
1344 al->socket = env->cpu[al->cpu].socket_id;
1345 }
1346
1347 if (al->map) {
1348 struct dso *dso = al->map->dso;
1349
1350 if (symbol_conf.dso_list &&
1351 (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1352 dso->short_name) ||
1353 (dso->short_name != dso->long_name &&
1354 strlist__has_entry(symbol_conf.dso_list,
1355 dso->long_name))))) {
1356 al->filtered |= (1 << HIST_FILTER__DSO);
1357 }
1358
1359 al->sym = map__find_symbol(al->map, al->addr,
1360 machine->symbol_filter);
1361 }
1362
1363 if (symbol_conf.sym_list &&
1364 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1365 al->sym->name))) {
1366 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1367 }
1368
1369 return 0;
1370}
1371
1372/*
1373 * The preprocess_sample method will return with reference counts for the
1374 * in it, when done using (and perhaps getting ref counts if needing to
1375 * keep a pointer to one of those entries) it must be paired with
1376 * addr_location__put(), so that the refcounts can be decremented.
1377 */
1378void addr_location__put(struct addr_location *al)
1379{
1380 thread__zput(al->thread);
1381}
1382
1383bool is_bts_event(struct perf_event_attr *attr)
1384{
1385 return attr->type == PERF_TYPE_HARDWARE &&
1386 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1387 attr->sample_period == 1;
1388}
1389
1390bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1391{
1392 if (attr->type == PERF_TYPE_SOFTWARE &&
1393 (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1394 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1395 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1396 return true;
1397
1398 if (is_bts_event(attr))
1399 return true;
1400
1401 return false;
1402}
1403
1404void thread__resolve(struct thread *thread, struct addr_location *al,
1405 struct perf_sample *sample)
1406{
1407 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1408 if (!al->map)
1409 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1410 sample->addr, al);
1411
1412 al->cpu = sample->cpu;
1413 al->sym = NULL;
1414
1415 if (al->map)
1416 al->sym = map__find_symbol(al->map, al->addr, NULL);
1417}