Loading...
1#define _FILE_OFFSET_BITS 64
2
3#include <linux/kernel.h>
4
5#include <byteswap.h>
6#include <unistd.h>
7#include <sys/types.h>
8#include <sys/mman.h>
9
10#include "evlist.h"
11#include "evsel.h"
12#include "session.h"
13#include "tool.h"
14#include "sort.h"
15#include "util.h"
16#include "cpumap.h"
17
18static int perf_session__open(struct perf_session *self, bool force)
19{
20 struct stat input_stat;
21
22 if (!strcmp(self->filename, "-")) {
23 self->fd_pipe = true;
24 self->fd = STDIN_FILENO;
25
26 if (perf_session__read_header(self, self->fd) < 0)
27 pr_err("incompatible file format (rerun with -v to learn more)");
28
29 return 0;
30 }
31
32 self->fd = open(self->filename, O_RDONLY);
33 if (self->fd < 0) {
34 int err = errno;
35
36 pr_err("failed to open %s: %s", self->filename, strerror(err));
37 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
38 pr_err(" (try 'perf record' first)");
39 pr_err("\n");
40 return -errno;
41 }
42
43 if (fstat(self->fd, &input_stat) < 0)
44 goto out_close;
45
46 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
47 pr_err("file %s not owned by current user or root\n",
48 self->filename);
49 goto out_close;
50 }
51
52 if (!input_stat.st_size) {
53 pr_info("zero-sized file (%s), nothing to do!\n",
54 self->filename);
55 goto out_close;
56 }
57
58 if (perf_session__read_header(self, self->fd) < 0) {
59 pr_err("incompatible file format (rerun with -v to learn more)");
60 goto out_close;
61 }
62
63 if (!perf_evlist__valid_sample_type(self->evlist)) {
64 pr_err("non matching sample_type");
65 goto out_close;
66 }
67
68 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
69 pr_err("non matching sample_id_all");
70 goto out_close;
71 }
72
73 self->size = input_stat.st_size;
74 return 0;
75
76out_close:
77 close(self->fd);
78 self->fd = -1;
79 return -1;
80}
81
82void perf_session__update_sample_type(struct perf_session *self)
83{
84 self->sample_type = perf_evlist__sample_type(self->evlist);
85 self->sample_size = __perf_evsel__sample_size(self->sample_type);
86 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
87 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
88 self->host_machine.id_hdr_size = self->id_hdr_size;
89}
90
91int perf_session__create_kernel_maps(struct perf_session *self)
92{
93 int ret = machine__create_kernel_maps(&self->host_machine);
94
95 if (ret >= 0)
96 ret = machines__create_guest_kernel_maps(&self->machines);
97 return ret;
98}
99
100static void perf_session__destroy_kernel_maps(struct perf_session *self)
101{
102 machine__destroy_kernel_maps(&self->host_machine);
103 machines__destroy_guest_kernel_maps(&self->machines);
104}
105
106struct perf_session *perf_session__new(const char *filename, int mode,
107 bool force, bool repipe,
108 struct perf_tool *tool)
109{
110 struct perf_session *self;
111 struct stat st;
112 size_t len;
113
114 if (!filename || !strlen(filename)) {
115 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
116 filename = "-";
117 else
118 filename = "perf.data";
119 }
120
121 len = strlen(filename);
122 self = zalloc(sizeof(*self) + len);
123
124 if (self == NULL)
125 goto out;
126
127 memcpy(self->filename, filename, len);
128 /*
129 * On 64bit we can mmap the data file in one go. No need for tiny mmap
130 * slices. On 32bit we use 32MB.
131 */
132#if BITS_PER_LONG == 64
133 self->mmap_window = ULLONG_MAX;
134#else
135 self->mmap_window = 32 * 1024 * 1024ULL;
136#endif
137 self->machines = RB_ROOT;
138 self->repipe = repipe;
139 INIT_LIST_HEAD(&self->ordered_samples.samples);
140 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
141 INIT_LIST_HEAD(&self->ordered_samples.to_free);
142 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
143 hists__init(&self->hists);
144
145 if (mode == O_RDONLY) {
146 if (perf_session__open(self, force) < 0)
147 goto out_delete;
148 perf_session__update_sample_type(self);
149 } else if (mode == O_WRONLY) {
150 /*
151 * In O_RDONLY mode this will be performed when reading the
152 * kernel MMAP event, in perf_event__process_mmap().
153 */
154 if (perf_session__create_kernel_maps(self) < 0)
155 goto out_delete;
156 }
157
158 if (tool && tool->ordering_requires_timestamps &&
159 tool->ordered_samples && !self->sample_id_all) {
160 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
161 tool->ordered_samples = false;
162 }
163
164out:
165 return self;
166out_delete:
167 perf_session__delete(self);
168 return NULL;
169}
170
171static void machine__delete_dead_threads(struct machine *machine)
172{
173 struct thread *n, *t;
174
175 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
176 list_del(&t->node);
177 thread__delete(t);
178 }
179}
180
181static void perf_session__delete_dead_threads(struct perf_session *session)
182{
183 machine__delete_dead_threads(&session->host_machine);
184}
185
186static void machine__delete_threads(struct machine *self)
187{
188 struct rb_node *nd = rb_first(&self->threads);
189
190 while (nd) {
191 struct thread *t = rb_entry(nd, struct thread, rb_node);
192
193 rb_erase(&t->rb_node, &self->threads);
194 nd = rb_next(nd);
195 thread__delete(t);
196 }
197}
198
199static void perf_session__delete_threads(struct perf_session *session)
200{
201 machine__delete_threads(&session->host_machine);
202}
203
204void perf_session__delete(struct perf_session *self)
205{
206 perf_session__destroy_kernel_maps(self);
207 perf_session__delete_dead_threads(self);
208 perf_session__delete_threads(self);
209 machine__exit(&self->host_machine);
210 close(self->fd);
211 free(self);
212}
213
214void machine__remove_thread(struct machine *self, struct thread *th)
215{
216 self->last_match = NULL;
217 rb_erase(&th->rb_node, &self->threads);
218 /*
219 * We may have references to this thread, for instance in some hist_entry
220 * instances, so just move them to a separate list.
221 */
222 list_add_tail(&th->node, &self->dead_threads);
223}
224
225static bool symbol__match_parent_regex(struct symbol *sym)
226{
227 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
228 return 1;
229
230 return 0;
231}
232
233static const u8 cpumodes[] = {
234 PERF_RECORD_MISC_USER,
235 PERF_RECORD_MISC_KERNEL,
236 PERF_RECORD_MISC_GUEST_USER,
237 PERF_RECORD_MISC_GUEST_KERNEL
238};
239#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
240
241static void ip__resolve_ams(struct machine *self, struct thread *thread,
242 struct addr_map_symbol *ams,
243 u64 ip)
244{
245 struct addr_location al;
246 size_t i;
247 u8 m;
248
249 memset(&al, 0, sizeof(al));
250
251 for (i = 0; i < NCPUMODES; i++) {
252 m = cpumodes[i];
253 /*
254 * We cannot use the header.misc hint to determine whether a
255 * branch stack address is user, kernel, guest, hypervisor.
256 * Branches may straddle the kernel/user/hypervisor boundaries.
257 * Thus, we have to try consecutively until we find a match
258 * or else, the symbol is unknown
259 */
260 thread__find_addr_location(thread, self, m, MAP__FUNCTION,
261 ip, &al, NULL);
262 if (al.sym)
263 goto found;
264 }
265found:
266 ams->addr = ip;
267 ams->al_addr = al.addr;
268 ams->sym = al.sym;
269 ams->map = al.map;
270}
271
272struct branch_info *machine__resolve_bstack(struct machine *self,
273 struct thread *thr,
274 struct branch_stack *bs)
275{
276 struct branch_info *bi;
277 unsigned int i;
278
279 bi = calloc(bs->nr, sizeof(struct branch_info));
280 if (!bi)
281 return NULL;
282
283 for (i = 0; i < bs->nr; i++) {
284 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
285 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
286 bi[i].flags = bs->entries[i].flags;
287 }
288 return bi;
289}
290
291int machine__resolve_callchain(struct machine *self,
292 struct perf_evsel *evsel __used,
293 struct thread *thread,
294 struct ip_callchain *chain,
295 struct symbol **parent)
296{
297 u8 cpumode = PERF_RECORD_MISC_USER;
298 unsigned int i;
299 int err;
300
301 callchain_cursor_reset(&callchain_cursor);
302
303 if (chain->nr > PERF_MAX_STACK_DEPTH) {
304 pr_warning("corrupted callchain. skipping...\n");
305 return 0;
306 }
307
308 for (i = 0; i < chain->nr; i++) {
309 u64 ip;
310 struct addr_location al;
311
312 if (callchain_param.order == ORDER_CALLEE)
313 ip = chain->ips[i];
314 else
315 ip = chain->ips[chain->nr - i - 1];
316
317 if (ip >= PERF_CONTEXT_MAX) {
318 switch (ip) {
319 case PERF_CONTEXT_HV:
320 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
321 case PERF_CONTEXT_KERNEL:
322 cpumode = PERF_RECORD_MISC_KERNEL; break;
323 case PERF_CONTEXT_USER:
324 cpumode = PERF_RECORD_MISC_USER; break;
325 default:
326 pr_debug("invalid callchain context: "
327 "%"PRId64"\n", (s64) ip);
328 /*
329 * It seems the callchain is corrupted.
330 * Discard all.
331 */
332 callchain_cursor_reset(&callchain_cursor);
333 return 0;
334 }
335 continue;
336 }
337
338 al.filtered = false;
339 thread__find_addr_location(thread, self, cpumode,
340 MAP__FUNCTION, ip, &al, NULL);
341 if (al.sym != NULL) {
342 if (sort__has_parent && !*parent &&
343 symbol__match_parent_regex(al.sym))
344 *parent = al.sym;
345 if (!symbol_conf.use_callchain)
346 break;
347 }
348
349 err = callchain_cursor_append(&callchain_cursor,
350 ip, al.map, al.sym);
351 if (err)
352 return err;
353 }
354
355 return 0;
356}
357
358static int process_event_synth_tracing_data_stub(union perf_event *event __used,
359 struct perf_session *session __used)
360{
361 dump_printf(": unhandled!\n");
362 return 0;
363}
364
365static int process_event_synth_attr_stub(union perf_event *event __used,
366 struct perf_evlist **pevlist __used)
367{
368 dump_printf(": unhandled!\n");
369 return 0;
370}
371
372static int process_event_sample_stub(struct perf_tool *tool __used,
373 union perf_event *event __used,
374 struct perf_sample *sample __used,
375 struct perf_evsel *evsel __used,
376 struct machine *machine __used)
377{
378 dump_printf(": unhandled!\n");
379 return 0;
380}
381
382static int process_event_stub(struct perf_tool *tool __used,
383 union perf_event *event __used,
384 struct perf_sample *sample __used,
385 struct machine *machine __used)
386{
387 dump_printf(": unhandled!\n");
388 return 0;
389}
390
391static int process_finished_round_stub(struct perf_tool *tool __used,
392 union perf_event *event __used,
393 struct perf_session *perf_session __used)
394{
395 dump_printf(": unhandled!\n");
396 return 0;
397}
398
399static int process_event_type_stub(struct perf_tool *tool __used,
400 union perf_event *event __used)
401{
402 dump_printf(": unhandled!\n");
403 return 0;
404}
405
406static int process_finished_round(struct perf_tool *tool,
407 union perf_event *event,
408 struct perf_session *session);
409
410static void perf_tool__fill_defaults(struct perf_tool *tool)
411{
412 if (tool->sample == NULL)
413 tool->sample = process_event_sample_stub;
414 if (tool->mmap == NULL)
415 tool->mmap = process_event_stub;
416 if (tool->comm == NULL)
417 tool->comm = process_event_stub;
418 if (tool->fork == NULL)
419 tool->fork = process_event_stub;
420 if (tool->exit == NULL)
421 tool->exit = process_event_stub;
422 if (tool->lost == NULL)
423 tool->lost = perf_event__process_lost;
424 if (tool->read == NULL)
425 tool->read = process_event_sample_stub;
426 if (tool->throttle == NULL)
427 tool->throttle = process_event_stub;
428 if (tool->unthrottle == NULL)
429 tool->unthrottle = process_event_stub;
430 if (tool->attr == NULL)
431 tool->attr = process_event_synth_attr_stub;
432 if (tool->event_type == NULL)
433 tool->event_type = process_event_type_stub;
434 if (tool->tracing_data == NULL)
435 tool->tracing_data = process_event_synth_tracing_data_stub;
436 if (tool->build_id == NULL)
437 tool->build_id = process_finished_round_stub;
438 if (tool->finished_round == NULL) {
439 if (tool->ordered_samples)
440 tool->finished_round = process_finished_round;
441 else
442 tool->finished_round = process_finished_round_stub;
443 }
444}
445
446void mem_bswap_32(void *src, int byte_size)
447{
448 u32 *m = src;
449 while (byte_size > 0) {
450 *m = bswap_32(*m);
451 byte_size -= sizeof(u32);
452 ++m;
453 }
454}
455
456void mem_bswap_64(void *src, int byte_size)
457{
458 u64 *m = src;
459
460 while (byte_size > 0) {
461 *m = bswap_64(*m);
462 byte_size -= sizeof(u64);
463 ++m;
464 }
465}
466
467static void swap_sample_id_all(union perf_event *event, void *data)
468{
469 void *end = (void *) event + event->header.size;
470 int size = end - data;
471
472 BUG_ON(size % sizeof(u64));
473 mem_bswap_64(data, size);
474}
475
476static void perf_event__all64_swap(union perf_event *event,
477 bool sample_id_all __used)
478{
479 struct perf_event_header *hdr = &event->header;
480 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
481}
482
483static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
484{
485 event->comm.pid = bswap_32(event->comm.pid);
486 event->comm.tid = bswap_32(event->comm.tid);
487
488 if (sample_id_all) {
489 void *data = &event->comm.comm;
490
491 data += ALIGN(strlen(data) + 1, sizeof(u64));
492 swap_sample_id_all(event, data);
493 }
494}
495
496static void perf_event__mmap_swap(union perf_event *event,
497 bool sample_id_all)
498{
499 event->mmap.pid = bswap_32(event->mmap.pid);
500 event->mmap.tid = bswap_32(event->mmap.tid);
501 event->mmap.start = bswap_64(event->mmap.start);
502 event->mmap.len = bswap_64(event->mmap.len);
503 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
504
505 if (sample_id_all) {
506 void *data = &event->mmap.filename;
507
508 data += ALIGN(strlen(data) + 1, sizeof(u64));
509 swap_sample_id_all(event, data);
510 }
511}
512
513static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
514{
515 event->fork.pid = bswap_32(event->fork.pid);
516 event->fork.tid = bswap_32(event->fork.tid);
517 event->fork.ppid = bswap_32(event->fork.ppid);
518 event->fork.ptid = bswap_32(event->fork.ptid);
519 event->fork.time = bswap_64(event->fork.time);
520
521 if (sample_id_all)
522 swap_sample_id_all(event, &event->fork + 1);
523}
524
525static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
526{
527 event->read.pid = bswap_32(event->read.pid);
528 event->read.tid = bswap_32(event->read.tid);
529 event->read.value = bswap_64(event->read.value);
530 event->read.time_enabled = bswap_64(event->read.time_enabled);
531 event->read.time_running = bswap_64(event->read.time_running);
532 event->read.id = bswap_64(event->read.id);
533
534 if (sample_id_all)
535 swap_sample_id_all(event, &event->read + 1);
536}
537
538static u8 revbyte(u8 b)
539{
540 int rev = (b >> 4) | ((b & 0xf) << 4);
541 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
542 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
543 return (u8) rev;
544}
545
546/*
547 * XXX this is hack in attempt to carry flags bitfield
548 * throught endian village. ABI says:
549 *
550 * Bit-fields are allocated from right to left (least to most significant)
551 * on little-endian implementations and from left to right (most to least
552 * significant) on big-endian implementations.
553 *
554 * The above seems to be byte specific, so we need to reverse each
555 * byte of the bitfield. 'Internet' also says this might be implementation
556 * specific and we probably need proper fix and carry perf_event_attr
557 * bitfield flags in separate data file FEAT_ section. Thought this seems
558 * to work for now.
559 */
560static void swap_bitfield(u8 *p, unsigned len)
561{
562 unsigned i;
563
564 for (i = 0; i < len; i++) {
565 *p = revbyte(*p);
566 p++;
567 }
568}
569
570/* exported for swapping attributes in file header */
571void perf_event__attr_swap(struct perf_event_attr *attr)
572{
573 attr->type = bswap_32(attr->type);
574 attr->size = bswap_32(attr->size);
575 attr->config = bswap_64(attr->config);
576 attr->sample_period = bswap_64(attr->sample_period);
577 attr->sample_type = bswap_64(attr->sample_type);
578 attr->read_format = bswap_64(attr->read_format);
579 attr->wakeup_events = bswap_32(attr->wakeup_events);
580 attr->bp_type = bswap_32(attr->bp_type);
581 attr->bp_addr = bswap_64(attr->bp_addr);
582 attr->bp_len = bswap_64(attr->bp_len);
583
584 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
585}
586
587static void perf_event__hdr_attr_swap(union perf_event *event,
588 bool sample_id_all __used)
589{
590 size_t size;
591
592 perf_event__attr_swap(&event->attr.attr);
593
594 size = event->header.size;
595 size -= (void *)&event->attr.id - (void *)event;
596 mem_bswap_64(event->attr.id, size);
597}
598
599static void perf_event__event_type_swap(union perf_event *event,
600 bool sample_id_all __used)
601{
602 event->event_type.event_type.event_id =
603 bswap_64(event->event_type.event_type.event_id);
604}
605
606static void perf_event__tracing_data_swap(union perf_event *event,
607 bool sample_id_all __used)
608{
609 event->tracing_data.size = bswap_32(event->tracing_data.size);
610}
611
612typedef void (*perf_event__swap_op)(union perf_event *event,
613 bool sample_id_all);
614
615static perf_event__swap_op perf_event__swap_ops[] = {
616 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
617 [PERF_RECORD_COMM] = perf_event__comm_swap,
618 [PERF_RECORD_FORK] = perf_event__task_swap,
619 [PERF_RECORD_EXIT] = perf_event__task_swap,
620 [PERF_RECORD_LOST] = perf_event__all64_swap,
621 [PERF_RECORD_READ] = perf_event__read_swap,
622 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
623 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
624 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
625 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
626 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
627 [PERF_RECORD_HEADER_MAX] = NULL,
628};
629
630struct sample_queue {
631 u64 timestamp;
632 u64 file_offset;
633 union perf_event *event;
634 struct list_head list;
635};
636
637static void perf_session_free_sample_buffers(struct perf_session *session)
638{
639 struct ordered_samples *os = &session->ordered_samples;
640
641 while (!list_empty(&os->to_free)) {
642 struct sample_queue *sq;
643
644 sq = list_entry(os->to_free.next, struct sample_queue, list);
645 list_del(&sq->list);
646 free(sq);
647 }
648}
649
650static int perf_session_deliver_event(struct perf_session *session,
651 union perf_event *event,
652 struct perf_sample *sample,
653 struct perf_tool *tool,
654 u64 file_offset);
655
656static void flush_sample_queue(struct perf_session *s,
657 struct perf_tool *tool)
658{
659 struct ordered_samples *os = &s->ordered_samples;
660 struct list_head *head = &os->samples;
661 struct sample_queue *tmp, *iter;
662 struct perf_sample sample;
663 u64 limit = os->next_flush;
664 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
665 unsigned idx = 0, progress_next = os->nr_samples / 16;
666 int ret;
667
668 if (!tool->ordered_samples || !limit)
669 return;
670
671 list_for_each_entry_safe(iter, tmp, head, list) {
672 if (iter->timestamp > limit)
673 break;
674
675 ret = perf_session__parse_sample(s, iter->event, &sample);
676 if (ret)
677 pr_err("Can't parse sample, err = %d\n", ret);
678 else
679 perf_session_deliver_event(s, iter->event, &sample, tool,
680 iter->file_offset);
681
682 os->last_flush = iter->timestamp;
683 list_del(&iter->list);
684 list_add(&iter->list, &os->sample_cache);
685 if (++idx >= progress_next) {
686 progress_next += os->nr_samples / 16;
687 ui_progress__update(idx, os->nr_samples,
688 "Processing time ordered events...");
689 }
690 }
691
692 if (list_empty(head)) {
693 os->last_sample = NULL;
694 } else if (last_ts <= limit) {
695 os->last_sample =
696 list_entry(head->prev, struct sample_queue, list);
697 }
698
699 os->nr_samples = 0;
700}
701
702/*
703 * When perf record finishes a pass on every buffers, it records this pseudo
704 * event.
705 * We record the max timestamp t found in the pass n.
706 * Assuming these timestamps are monotonic across cpus, we know that if
707 * a buffer still has events with timestamps below t, they will be all
708 * available and then read in the pass n + 1.
709 * Hence when we start to read the pass n + 2, we can safely flush every
710 * events with timestamps below t.
711 *
712 * ============ PASS n =================
713 * CPU 0 | CPU 1
714 * |
715 * cnt1 timestamps | cnt2 timestamps
716 * 1 | 2
717 * 2 | 3
718 * - | 4 <--- max recorded
719 *
720 * ============ PASS n + 1 ==============
721 * CPU 0 | CPU 1
722 * |
723 * cnt1 timestamps | cnt2 timestamps
724 * 3 | 5
725 * 4 | 6
726 * 5 | 7 <---- max recorded
727 *
728 * Flush every events below timestamp 4
729 *
730 * ============ PASS n + 2 ==============
731 * CPU 0 | CPU 1
732 * |
733 * cnt1 timestamps | cnt2 timestamps
734 * 6 | 8
735 * 7 | 9
736 * - | 10
737 *
738 * Flush every events below timestamp 7
739 * etc...
740 */
741static int process_finished_round(struct perf_tool *tool,
742 union perf_event *event __used,
743 struct perf_session *session)
744{
745 flush_sample_queue(session, tool);
746 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
747
748 return 0;
749}
750
751/* The queue is ordered by time */
752static void __queue_event(struct sample_queue *new, struct perf_session *s)
753{
754 struct ordered_samples *os = &s->ordered_samples;
755 struct sample_queue *sample = os->last_sample;
756 u64 timestamp = new->timestamp;
757 struct list_head *p;
758
759 ++os->nr_samples;
760 os->last_sample = new;
761
762 if (!sample) {
763 list_add(&new->list, &os->samples);
764 os->max_timestamp = timestamp;
765 return;
766 }
767
768 /*
769 * last_sample might point to some random place in the list as it's
770 * the last queued event. We expect that the new event is close to
771 * this.
772 */
773 if (sample->timestamp <= timestamp) {
774 while (sample->timestamp <= timestamp) {
775 p = sample->list.next;
776 if (p == &os->samples) {
777 list_add_tail(&new->list, &os->samples);
778 os->max_timestamp = timestamp;
779 return;
780 }
781 sample = list_entry(p, struct sample_queue, list);
782 }
783 list_add_tail(&new->list, &sample->list);
784 } else {
785 while (sample->timestamp > timestamp) {
786 p = sample->list.prev;
787 if (p == &os->samples) {
788 list_add(&new->list, &os->samples);
789 return;
790 }
791 sample = list_entry(p, struct sample_queue, list);
792 }
793 list_add(&new->list, &sample->list);
794 }
795}
796
797#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
798
799static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
800 struct perf_sample *sample, u64 file_offset)
801{
802 struct ordered_samples *os = &s->ordered_samples;
803 struct list_head *sc = &os->sample_cache;
804 u64 timestamp = sample->time;
805 struct sample_queue *new;
806
807 if (!timestamp || timestamp == ~0ULL)
808 return -ETIME;
809
810 if (timestamp < s->ordered_samples.last_flush) {
811 printf("Warning: Timestamp below last timeslice flush\n");
812 return -EINVAL;
813 }
814
815 if (!list_empty(sc)) {
816 new = list_entry(sc->next, struct sample_queue, list);
817 list_del(&new->list);
818 } else if (os->sample_buffer) {
819 new = os->sample_buffer + os->sample_buffer_idx;
820 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
821 os->sample_buffer = NULL;
822 } else {
823 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
824 if (!os->sample_buffer)
825 return -ENOMEM;
826 list_add(&os->sample_buffer->list, &os->to_free);
827 os->sample_buffer_idx = 2;
828 new = os->sample_buffer + 1;
829 }
830
831 new->timestamp = timestamp;
832 new->file_offset = file_offset;
833 new->event = event;
834
835 __queue_event(new, s);
836
837 return 0;
838}
839
840static void callchain__printf(struct perf_sample *sample)
841{
842 unsigned int i;
843
844 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
845
846 for (i = 0; i < sample->callchain->nr; i++)
847 printf("..... %2d: %016" PRIx64 "\n",
848 i, sample->callchain->ips[i]);
849}
850
851static void branch_stack__printf(struct perf_sample *sample)
852{
853 uint64_t i;
854
855 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
856
857 for (i = 0; i < sample->branch_stack->nr; i++)
858 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
859 i, sample->branch_stack->entries[i].from,
860 sample->branch_stack->entries[i].to);
861}
862
863static void perf_session__print_tstamp(struct perf_session *session,
864 union perf_event *event,
865 struct perf_sample *sample)
866{
867 if (event->header.type != PERF_RECORD_SAMPLE &&
868 !session->sample_id_all) {
869 fputs("-1 -1 ", stdout);
870 return;
871 }
872
873 if ((session->sample_type & PERF_SAMPLE_CPU))
874 printf("%u ", sample->cpu);
875
876 if (session->sample_type & PERF_SAMPLE_TIME)
877 printf("%" PRIu64 " ", sample->time);
878}
879
880static void dump_event(struct perf_session *session, union perf_event *event,
881 u64 file_offset, struct perf_sample *sample)
882{
883 if (!dump_trace)
884 return;
885
886 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
887 file_offset, event->header.size, event->header.type);
888
889 trace_event(event);
890
891 if (sample)
892 perf_session__print_tstamp(session, event, sample);
893
894 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
895 event->header.size, perf_event__name(event->header.type));
896}
897
898static void dump_sample(struct perf_session *session, union perf_event *event,
899 struct perf_sample *sample)
900{
901 if (!dump_trace)
902 return;
903
904 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
905 event->header.misc, sample->pid, sample->tid, sample->ip,
906 sample->period, sample->addr);
907
908 if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
909 callchain__printf(sample);
910
911 if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
912 branch_stack__printf(sample);
913}
914
915static struct machine *
916 perf_session__find_machine_for_cpumode(struct perf_session *session,
917 union perf_event *event)
918{
919 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
920
921 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
922 u32 pid;
923
924 if (event->header.type == PERF_RECORD_MMAP)
925 pid = event->mmap.pid;
926 else
927 pid = event->ip.pid;
928
929 return perf_session__findnew_machine(session, pid);
930 }
931
932 return perf_session__find_host_machine(session);
933}
934
935static int perf_session_deliver_event(struct perf_session *session,
936 union perf_event *event,
937 struct perf_sample *sample,
938 struct perf_tool *tool,
939 u64 file_offset)
940{
941 struct perf_evsel *evsel;
942 struct machine *machine;
943
944 dump_event(session, event, file_offset, sample);
945
946 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
947 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
948 /*
949 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
950 * because the tools right now may apply filters, discarding
951 * some of the samples. For consistency, in the future we
952 * should have something like nr_filtered_samples and remove
953 * the sample->period from total_sample_period, etc, KISS for
954 * now tho.
955 *
956 * Also testing against NULL allows us to handle files without
957 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
958 * future probably it'll be a good idea to restrict event
959 * processing via perf_session to files with both set.
960 */
961 hists__inc_nr_events(&evsel->hists, event->header.type);
962 }
963
964 machine = perf_session__find_machine_for_cpumode(session, event);
965
966 switch (event->header.type) {
967 case PERF_RECORD_SAMPLE:
968 dump_sample(session, event, sample);
969 if (evsel == NULL) {
970 ++session->hists.stats.nr_unknown_id;
971 return 0;
972 }
973 if (machine == NULL) {
974 ++session->hists.stats.nr_unprocessable_samples;
975 return 0;
976 }
977 return tool->sample(tool, event, sample, evsel, machine);
978 case PERF_RECORD_MMAP:
979 return tool->mmap(tool, event, sample, machine);
980 case PERF_RECORD_COMM:
981 return tool->comm(tool, event, sample, machine);
982 case PERF_RECORD_FORK:
983 return tool->fork(tool, event, sample, machine);
984 case PERF_RECORD_EXIT:
985 return tool->exit(tool, event, sample, machine);
986 case PERF_RECORD_LOST:
987 if (tool->lost == perf_event__process_lost)
988 session->hists.stats.total_lost += event->lost.lost;
989 return tool->lost(tool, event, sample, machine);
990 case PERF_RECORD_READ:
991 return tool->read(tool, event, sample, evsel, machine);
992 case PERF_RECORD_THROTTLE:
993 return tool->throttle(tool, event, sample, machine);
994 case PERF_RECORD_UNTHROTTLE:
995 return tool->unthrottle(tool, event, sample, machine);
996 default:
997 ++session->hists.stats.nr_unknown_events;
998 return -1;
999 }
1000}
1001
1002static int perf_session__preprocess_sample(struct perf_session *session,
1003 union perf_event *event, struct perf_sample *sample)
1004{
1005 if (event->header.type != PERF_RECORD_SAMPLE ||
1006 !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
1007 return 0;
1008
1009 if (!ip_callchain__valid(sample->callchain, event)) {
1010 pr_debug("call-chain problem with event, skipping it.\n");
1011 ++session->hists.stats.nr_invalid_chains;
1012 session->hists.stats.total_invalid_chains += sample->period;
1013 return -EINVAL;
1014 }
1015 return 0;
1016}
1017
1018static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
1019 struct perf_tool *tool, u64 file_offset)
1020{
1021 int err;
1022
1023 dump_event(session, event, file_offset, NULL);
1024
1025 /* These events are processed right away */
1026 switch (event->header.type) {
1027 case PERF_RECORD_HEADER_ATTR:
1028 err = tool->attr(event, &session->evlist);
1029 if (err == 0)
1030 perf_session__update_sample_type(session);
1031 return err;
1032 case PERF_RECORD_HEADER_EVENT_TYPE:
1033 return tool->event_type(tool, event);
1034 case PERF_RECORD_HEADER_TRACING_DATA:
1035 /* setup for reading amidst mmap */
1036 lseek(session->fd, file_offset, SEEK_SET);
1037 return tool->tracing_data(event, session);
1038 case PERF_RECORD_HEADER_BUILD_ID:
1039 return tool->build_id(tool, event, session);
1040 case PERF_RECORD_FINISHED_ROUND:
1041 return tool->finished_round(tool, event, session);
1042 default:
1043 return -EINVAL;
1044 }
1045}
1046
1047static void event_swap(union perf_event *event, bool sample_id_all)
1048{
1049 perf_event__swap_op swap;
1050
1051 swap = perf_event__swap_ops[event->header.type];
1052 if (swap)
1053 swap(event, sample_id_all);
1054}
1055
1056static int perf_session__process_event(struct perf_session *session,
1057 union perf_event *event,
1058 struct perf_tool *tool,
1059 u64 file_offset)
1060{
1061 struct perf_sample sample;
1062 int ret;
1063
1064 if (session->header.needs_swap)
1065 event_swap(event, session->sample_id_all);
1066
1067 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1068 return -EINVAL;
1069
1070 hists__inc_nr_events(&session->hists, event->header.type);
1071
1072 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1073 return perf_session__process_user_event(session, event, tool, file_offset);
1074
1075 /*
1076 * For all kernel events we get the sample data
1077 */
1078 ret = perf_session__parse_sample(session, event, &sample);
1079 if (ret)
1080 return ret;
1081
1082 /* Preprocess sample records - precheck callchains */
1083 if (perf_session__preprocess_sample(session, event, &sample))
1084 return 0;
1085
1086 if (tool->ordered_samples) {
1087 ret = perf_session_queue_event(session, event, &sample,
1088 file_offset);
1089 if (ret != -ETIME)
1090 return ret;
1091 }
1092
1093 return perf_session_deliver_event(session, event, &sample, tool,
1094 file_offset);
1095}
1096
1097void perf_event_header__bswap(struct perf_event_header *self)
1098{
1099 self->type = bswap_32(self->type);
1100 self->misc = bswap_16(self->misc);
1101 self->size = bswap_16(self->size);
1102}
1103
1104struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1105{
1106 return machine__findnew_thread(&session->host_machine, pid);
1107}
1108
1109static struct thread *perf_session__register_idle_thread(struct perf_session *self)
1110{
1111 struct thread *thread = perf_session__findnew(self, 0);
1112
1113 if (thread == NULL || thread__set_comm(thread, "swapper")) {
1114 pr_err("problem inserting idle task.\n");
1115 thread = NULL;
1116 }
1117
1118 return thread;
1119}
1120
1121static void perf_session__warn_about_errors(const struct perf_session *session,
1122 const struct perf_tool *tool)
1123{
1124 if (tool->lost == perf_event__process_lost &&
1125 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
1126 ui__warning("Processed %d events and lost %d chunks!\n\n"
1127 "Check IO/CPU overload!\n\n",
1128 session->hists.stats.nr_events[0],
1129 session->hists.stats.nr_events[PERF_RECORD_LOST]);
1130 }
1131
1132 if (session->hists.stats.nr_unknown_events != 0) {
1133 ui__warning("Found %u unknown events!\n\n"
1134 "Is this an older tool processing a perf.data "
1135 "file generated by a more recent tool?\n\n"
1136 "If that is not the case, consider "
1137 "reporting to linux-kernel@vger.kernel.org.\n\n",
1138 session->hists.stats.nr_unknown_events);
1139 }
1140
1141 if (session->hists.stats.nr_unknown_id != 0) {
1142 ui__warning("%u samples with id not present in the header\n",
1143 session->hists.stats.nr_unknown_id);
1144 }
1145
1146 if (session->hists.stats.nr_invalid_chains != 0) {
1147 ui__warning("Found invalid callchains!\n\n"
1148 "%u out of %u events were discarded for this reason.\n\n"
1149 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1150 session->hists.stats.nr_invalid_chains,
1151 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
1152 }
1153
1154 if (session->hists.stats.nr_unprocessable_samples != 0) {
1155 ui__warning("%u unprocessable samples recorded.\n"
1156 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1157 session->hists.stats.nr_unprocessable_samples);
1158 }
1159}
1160
1161#define session_done() (*(volatile int *)(&session_done))
1162volatile int session_done;
1163
1164static int __perf_session__process_pipe_events(struct perf_session *self,
1165 struct perf_tool *tool)
1166{
1167 union perf_event *event;
1168 uint32_t size, cur_size = 0;
1169 void *buf = NULL;
1170 int skip = 0;
1171 u64 head;
1172 int err;
1173 void *p;
1174
1175 perf_tool__fill_defaults(tool);
1176
1177 head = 0;
1178 cur_size = sizeof(union perf_event);
1179
1180 buf = malloc(cur_size);
1181 if (!buf)
1182 return -errno;
1183more:
1184 event = buf;
1185 err = readn(self->fd, event, sizeof(struct perf_event_header));
1186 if (err <= 0) {
1187 if (err == 0)
1188 goto done;
1189
1190 pr_err("failed to read event header\n");
1191 goto out_err;
1192 }
1193
1194 if (self->header.needs_swap)
1195 perf_event_header__bswap(&event->header);
1196
1197 size = event->header.size;
1198 if (size == 0)
1199 size = 8;
1200
1201 if (size > cur_size) {
1202 void *new = realloc(buf, size);
1203 if (!new) {
1204 pr_err("failed to allocate memory to read event\n");
1205 goto out_err;
1206 }
1207 buf = new;
1208 cur_size = size;
1209 event = buf;
1210 }
1211 p = event;
1212 p += sizeof(struct perf_event_header);
1213
1214 if (size - sizeof(struct perf_event_header)) {
1215 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1216 if (err <= 0) {
1217 if (err == 0) {
1218 pr_err("unexpected end of event stream\n");
1219 goto done;
1220 }
1221
1222 pr_err("failed to read event data\n");
1223 goto out_err;
1224 }
1225 }
1226
1227 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1228 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1229 head, event->header.size, event->header.type);
1230 err = -EINVAL;
1231 goto out_err;
1232 }
1233
1234 head += size;
1235
1236 if (skip > 0)
1237 head += skip;
1238
1239 if (!session_done())
1240 goto more;
1241done:
1242 err = 0;
1243out_err:
1244 free(buf);
1245 perf_session__warn_about_errors(self, tool);
1246 perf_session_free_sample_buffers(self);
1247 return err;
1248}
1249
1250static union perf_event *
1251fetch_mmaped_event(struct perf_session *session,
1252 u64 head, size_t mmap_size, char *buf)
1253{
1254 union perf_event *event;
1255
1256 /*
1257 * Ensure we have enough space remaining to read
1258 * the size of the event in the headers.
1259 */
1260 if (head + sizeof(event->header) > mmap_size)
1261 return NULL;
1262
1263 event = (union perf_event *)(buf + head);
1264
1265 if (session->header.needs_swap)
1266 perf_event_header__bswap(&event->header);
1267
1268 if (head + event->header.size > mmap_size)
1269 return NULL;
1270
1271 return event;
1272}
1273
1274int __perf_session__process_events(struct perf_session *session,
1275 u64 data_offset, u64 data_size,
1276 u64 file_size, struct perf_tool *tool)
1277{
1278 u64 head, page_offset, file_offset, file_pos, progress_next;
1279 int err, mmap_prot, mmap_flags, map_idx = 0;
1280 size_t page_size, mmap_size;
1281 char *buf, *mmaps[8];
1282 union perf_event *event;
1283 uint32_t size;
1284
1285 perf_tool__fill_defaults(tool);
1286
1287 page_size = sysconf(_SC_PAGESIZE);
1288
1289 page_offset = page_size * (data_offset / page_size);
1290 file_offset = page_offset;
1291 head = data_offset - page_offset;
1292
1293 if (data_offset + data_size < file_size)
1294 file_size = data_offset + data_size;
1295
1296 progress_next = file_size / 16;
1297
1298 mmap_size = session->mmap_window;
1299 if (mmap_size > file_size)
1300 mmap_size = file_size;
1301
1302 memset(mmaps, 0, sizeof(mmaps));
1303
1304 mmap_prot = PROT_READ;
1305 mmap_flags = MAP_SHARED;
1306
1307 if (session->header.needs_swap) {
1308 mmap_prot |= PROT_WRITE;
1309 mmap_flags = MAP_PRIVATE;
1310 }
1311remap:
1312 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1313 file_offset);
1314 if (buf == MAP_FAILED) {
1315 pr_err("failed to mmap file\n");
1316 err = -errno;
1317 goto out_err;
1318 }
1319 mmaps[map_idx] = buf;
1320 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1321 file_pos = file_offset + head;
1322
1323more:
1324 event = fetch_mmaped_event(session, head, mmap_size, buf);
1325 if (!event) {
1326 if (mmaps[map_idx]) {
1327 munmap(mmaps[map_idx], mmap_size);
1328 mmaps[map_idx] = NULL;
1329 }
1330
1331 page_offset = page_size * (head / page_size);
1332 file_offset += page_offset;
1333 head -= page_offset;
1334 goto remap;
1335 }
1336
1337 size = event->header.size;
1338
1339 if (size == 0 ||
1340 perf_session__process_event(session, event, tool, file_pos) < 0) {
1341 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1342 file_offset + head, event->header.size,
1343 event->header.type);
1344 err = -EINVAL;
1345 goto out_err;
1346 }
1347
1348 head += size;
1349 file_pos += size;
1350
1351 if (file_pos >= progress_next) {
1352 progress_next += file_size / 16;
1353 ui_progress__update(file_pos, file_size,
1354 "Processing events...");
1355 }
1356
1357 if (file_pos < file_size)
1358 goto more;
1359
1360 err = 0;
1361 /* do the final flush for ordered samples */
1362 session->ordered_samples.next_flush = ULLONG_MAX;
1363 flush_sample_queue(session, tool);
1364out_err:
1365 perf_session__warn_about_errors(session, tool);
1366 perf_session_free_sample_buffers(session);
1367 return err;
1368}
1369
1370int perf_session__process_events(struct perf_session *self,
1371 struct perf_tool *tool)
1372{
1373 int err;
1374
1375 if (perf_session__register_idle_thread(self) == NULL)
1376 return -ENOMEM;
1377
1378 if (!self->fd_pipe)
1379 err = __perf_session__process_events(self,
1380 self->header.data_offset,
1381 self->header.data_size,
1382 self->size, tool);
1383 else
1384 err = __perf_session__process_pipe_events(self, tool);
1385
1386 return err;
1387}
1388
1389bool perf_session__has_traces(struct perf_session *self, const char *msg)
1390{
1391 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
1392 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1393 return false;
1394 }
1395
1396 return true;
1397}
1398
1399int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1400 const char *symbol_name, u64 addr)
1401{
1402 char *bracket;
1403 enum map_type i;
1404 struct ref_reloc_sym *ref;
1405
1406 ref = zalloc(sizeof(struct ref_reloc_sym));
1407 if (ref == NULL)
1408 return -ENOMEM;
1409
1410 ref->name = strdup(symbol_name);
1411 if (ref->name == NULL) {
1412 free(ref);
1413 return -ENOMEM;
1414 }
1415
1416 bracket = strchr(ref->name, ']');
1417 if (bracket)
1418 *bracket = '\0';
1419
1420 ref->addr = addr;
1421
1422 for (i = 0; i < MAP__NR_TYPES; ++i) {
1423 struct kmap *kmap = map__kmap(maps[i]);
1424 kmap->ref_reloc_sym = ref;
1425 }
1426
1427 return 0;
1428}
1429
1430size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1431{
1432 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
1433 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
1434 machines__fprintf_dsos(&self->machines, fp);
1435}
1436
1437size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1438 bool with_hits)
1439{
1440 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
1441 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
1442}
1443
1444size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1445{
1446 struct perf_evsel *pos;
1447 size_t ret = fprintf(fp, "Aggregated stats:\n");
1448
1449 ret += hists__fprintf_nr_events(&session->hists, fp);
1450
1451 list_for_each_entry(pos, &session->evlist->entries, node) {
1452 ret += fprintf(fp, "%s stats:\n", event_name(pos));
1453 ret += hists__fprintf_nr_events(&pos->hists, fp);
1454 }
1455
1456 return ret;
1457}
1458
1459size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1460{
1461 /*
1462 * FIXME: Here we have to actually print all the machines in this
1463 * session, not just the host...
1464 */
1465 return machine__fprintf(&session->host_machine, fp);
1466}
1467
1468void perf_session__remove_thread(struct perf_session *session,
1469 struct thread *th)
1470{
1471 /*
1472 * FIXME: This one makes no sense, we need to remove the thread from
1473 * the machine it belongs to, perf_session can have many machines, so
1474 * doing it always on ->host_machine is wrong. Fix when auditing all
1475 * the 'perf kvm' code.
1476 */
1477 machine__remove_thread(&session->host_machine, th);
1478}
1479
1480struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1481 unsigned int type)
1482{
1483 struct perf_evsel *pos;
1484
1485 list_for_each_entry(pos, &session->evlist->entries, node) {
1486 if (pos->attr.type == type)
1487 return pos;
1488 }
1489 return NULL;
1490}
1491
1492void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1493 struct machine *machine, struct perf_evsel *evsel,
1494 int print_sym, int print_dso, int print_symoffset)
1495{
1496 struct addr_location al;
1497 struct callchain_cursor_node *node;
1498
1499 if (perf_event__preprocess_sample(event, machine, &al, sample,
1500 NULL) < 0) {
1501 error("problem processing %d event, skipping it.\n",
1502 event->header.type);
1503 return;
1504 }
1505
1506 if (symbol_conf.use_callchain && sample->callchain) {
1507
1508 if (machine__resolve_callchain(machine, evsel, al.thread,
1509 sample->callchain, NULL) != 0) {
1510 if (verbose)
1511 error("Failed to resolve callchain. Skipping\n");
1512 return;
1513 }
1514 callchain_cursor_commit(&callchain_cursor);
1515
1516 while (1) {
1517 node = callchain_cursor_current(&callchain_cursor);
1518 if (!node)
1519 break;
1520
1521 printf("\t%16" PRIx64, node->ip);
1522 if (print_sym) {
1523 printf(" ");
1524 symbol__fprintf_symname(node->sym, stdout);
1525 }
1526 if (print_dso) {
1527 printf(" (");
1528 map__fprintf_dsoname(node->map, stdout);
1529 printf(")");
1530 }
1531 printf("\n");
1532
1533 callchain_cursor_advance(&callchain_cursor);
1534 }
1535
1536 } else {
1537 printf("%16" PRIx64, sample->ip);
1538 if (print_sym) {
1539 printf(" ");
1540 if (print_symoffset)
1541 symbol__fprintf_symname_offs(al.sym, &al,
1542 stdout);
1543 else
1544 symbol__fprintf_symname(al.sym, stdout);
1545 }
1546
1547 if (print_dso) {
1548 printf(" (");
1549 map__fprintf_dsoname(al.map, stdout);
1550 printf(")");
1551 }
1552 }
1553}
1554
1555int perf_session__cpu_bitmap(struct perf_session *session,
1556 const char *cpu_list, unsigned long *cpu_bitmap)
1557{
1558 int i;
1559 struct cpu_map *map;
1560
1561 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1562 struct perf_evsel *evsel;
1563
1564 evsel = perf_session__find_first_evtype(session, i);
1565 if (!evsel)
1566 continue;
1567
1568 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1569 pr_err("File does not contain CPU events. "
1570 "Remove -c option to proceed.\n");
1571 return -1;
1572 }
1573 }
1574
1575 map = cpu_map__new(cpu_list);
1576 if (map == NULL) {
1577 pr_err("Invalid cpu_list\n");
1578 return -1;
1579 }
1580
1581 for (i = 0; i < map->nr; i++) {
1582 int cpu = map->map[i];
1583
1584 if (cpu >= MAX_NR_CPUS) {
1585 pr_err("Requested CPU %d too large. "
1586 "Consider raising MAX_NR_CPUS\n", cpu);
1587 return -1;
1588 }
1589
1590 set_bit(cpu, cpu_bitmap);
1591 }
1592
1593 return 0;
1594}
1595
1596void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1597 bool full)
1598{
1599 struct stat st;
1600 int ret;
1601
1602 if (session == NULL || fp == NULL)
1603 return;
1604
1605 ret = fstat(session->fd, &st);
1606 if (ret == -1)
1607 return;
1608
1609 fprintf(fp, "# ========\n");
1610 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1611 perf_header__fprintf_info(session, fp, full);
1612 fprintf(fp, "# ========\n#\n");
1613}
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <signal.h>
4#include <inttypes.h>
5#include <linux/err.h>
6#include <linux/kernel.h>
7#include <linux/zalloc.h>
8#include <api/fs/fs.h>
9
10#include <byteswap.h>
11#include <unistd.h>
12#include <sys/types.h>
13#include <sys/mman.h>
14#include <perf/cpumap.h>
15
16#include "map_symbol.h"
17#include "branch.h"
18#include "debug.h"
19#include "env.h"
20#include "evlist.h"
21#include "evsel.h"
22#include "memswap.h"
23#include "map.h"
24#include "symbol.h"
25#include "session.h"
26#include "tool.h"
27#include "perf_regs.h"
28#include "asm/bug.h"
29#include "auxtrace.h"
30#include "thread.h"
31#include "thread-stack.h"
32#include "sample-raw.h"
33#include "stat.h"
34#include "tsc.h"
35#include "ui/progress.h"
36#include "util.h"
37#include "arch/common.h"
38#include "units.h"
39#include "annotate.h"
40#include <internal/lib.h>
41
42static int perf_session__deliver_event(struct perf_session *session,
43 union perf_event *event,
44 const struct perf_tool *tool,
45 u64 file_offset,
46 const char *file_path);
47
48static int perf_session__open(struct perf_session *session)
49{
50 struct perf_data *data = session->data;
51
52 if (perf_session__read_header(session) < 0) {
53 pr_err("incompatible file format (rerun with -v to learn more)\n");
54 return -1;
55 }
56
57 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) {
58 /* Auxiliary events may reference exited threads, hold onto dead ones. */
59 symbol_conf.keep_exited_threads = true;
60 }
61
62 if (perf_data__is_pipe(data))
63 return 0;
64
65 if (perf_header__has_feat(&session->header, HEADER_STAT))
66 return 0;
67
68 if (!evlist__valid_sample_type(session->evlist)) {
69 pr_err("non matching sample_type\n");
70 return -1;
71 }
72
73 if (!evlist__valid_sample_id_all(session->evlist)) {
74 pr_err("non matching sample_id_all\n");
75 return -1;
76 }
77
78 if (!evlist__valid_read_format(session->evlist)) {
79 pr_err("non matching read_format\n");
80 return -1;
81 }
82
83 return 0;
84}
85
86void perf_session__set_id_hdr_size(struct perf_session *session)
87{
88 u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
89
90 machines__set_id_hdr_size(&session->machines, id_hdr_size);
91}
92
93int perf_session__create_kernel_maps(struct perf_session *session)
94{
95 int ret = machine__create_kernel_maps(&session->machines.host);
96
97 if (ret >= 0)
98 ret = machines__create_guest_kernel_maps(&session->machines);
99 return ret;
100}
101
102static void perf_session__destroy_kernel_maps(struct perf_session *session)
103{
104 machines__destroy_kernel_maps(&session->machines);
105}
106
107static bool perf_session__has_comm_exec(struct perf_session *session)
108{
109 struct evsel *evsel;
110
111 evlist__for_each_entry(session->evlist, evsel) {
112 if (evsel->core.attr.comm_exec)
113 return true;
114 }
115
116 return false;
117}
118
119static void perf_session__set_comm_exec(struct perf_session *session)
120{
121 bool comm_exec = perf_session__has_comm_exec(session);
122
123 machines__set_comm_exec(&session->machines, comm_exec);
124}
125
126static int ordered_events__deliver_event(struct ordered_events *oe,
127 struct ordered_event *event)
128{
129 struct perf_session *session = container_of(oe, struct perf_session,
130 ordered_events);
131
132 return perf_session__deliver_event(session, event->event,
133 session->tool, event->file_offset,
134 event->file_path);
135}
136
137struct perf_session *__perf_session__new(struct perf_data *data,
138 struct perf_tool *tool,
139 bool trace_event_repipe)
140{
141 int ret = -ENOMEM;
142 struct perf_session *session = zalloc(sizeof(*session));
143
144 if (!session)
145 goto out;
146
147 session->trace_event_repipe = trace_event_repipe;
148 session->tool = tool;
149 session->decomp_data.zstd_decomp = &session->zstd_data;
150 session->active_decomp = &session->decomp_data;
151 INIT_LIST_HEAD(&session->auxtrace_index);
152 machines__init(&session->machines);
153 ordered_events__init(&session->ordered_events,
154 ordered_events__deliver_event, NULL);
155
156 perf_env__init(&session->header.env);
157 if (data) {
158 ret = perf_data__open(data);
159 if (ret < 0)
160 goto out_delete;
161
162 session->data = data;
163
164 if (perf_data__is_read(data)) {
165 ret = perf_session__open(session);
166 if (ret < 0)
167 goto out_delete;
168
169 /*
170 * set session attributes that are present in perf.data
171 * but not in pipe-mode.
172 */
173 if (!data->is_pipe) {
174 perf_session__set_id_hdr_size(session);
175 perf_session__set_comm_exec(session);
176 }
177
178 evlist__init_trace_event_sample_raw(session->evlist);
179
180 /* Open the directory data. */
181 if (data->is_dir) {
182 ret = perf_data__open_dir(data);
183 if (ret)
184 goto out_delete;
185 }
186
187 if (!symbol_conf.kallsyms_name &&
188 !symbol_conf.vmlinux_name)
189 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
190 }
191 } else {
192 session->machines.host.env = &perf_env;
193 }
194
195 session->machines.host.single_address_space =
196 perf_env__single_address_space(session->machines.host.env);
197
198 if (!data || perf_data__is_write(data)) {
199 /*
200 * In O_RDONLY mode this will be performed when reading the
201 * kernel MMAP event, in perf_event__process_mmap().
202 */
203 if (perf_session__create_kernel_maps(session) < 0)
204 pr_warning("Cannot read kernel map\n");
205 }
206
207 /*
208 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
209 * processed, so evlist__sample_id_all is not meaningful here.
210 */
211 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
212 tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
213 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
214 tool->ordered_events = false;
215 }
216
217 return session;
218
219 out_delete:
220 perf_session__delete(session);
221 out:
222 return ERR_PTR(ret);
223}
224
225static void perf_decomp__release_events(struct decomp *next)
226{
227 struct decomp *decomp;
228 size_t mmap_len;
229
230 do {
231 decomp = next;
232 if (decomp == NULL)
233 break;
234 next = decomp->next;
235 mmap_len = decomp->mmap_len;
236 munmap(decomp, mmap_len);
237 } while (1);
238}
239
240void perf_session__delete(struct perf_session *session)
241{
242 if (session == NULL)
243 return;
244 auxtrace__free(session);
245 auxtrace_index__free(&session->auxtrace_index);
246 debuginfo_cache__delete();
247 perf_session__destroy_kernel_maps(session);
248 perf_decomp__release_events(session->decomp_data.decomp);
249 perf_env__exit(&session->header.env);
250 machines__exit(&session->machines);
251 if (session->data) {
252 if (perf_data__is_read(session->data))
253 evlist__delete(session->evlist);
254 perf_data__close(session->data);
255 }
256#ifdef HAVE_LIBTRACEEVENT
257 trace_event__cleanup(&session->tevent);
258#endif
259 free(session);
260}
261
262static void swap_sample_id_all(union perf_event *event, void *data)
263{
264 void *end = (void *) event + event->header.size;
265 int size = end - data;
266
267 BUG_ON(size % sizeof(u64));
268 mem_bswap_64(data, size);
269}
270
271static void perf_event__all64_swap(union perf_event *event,
272 bool sample_id_all __maybe_unused)
273{
274 struct perf_event_header *hdr = &event->header;
275 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
276}
277
278static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
279{
280 event->comm.pid = bswap_32(event->comm.pid);
281 event->comm.tid = bswap_32(event->comm.tid);
282
283 if (sample_id_all) {
284 void *data = &event->comm.comm;
285
286 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
287 swap_sample_id_all(event, data);
288 }
289}
290
291static void perf_event__mmap_swap(union perf_event *event,
292 bool sample_id_all)
293{
294 event->mmap.pid = bswap_32(event->mmap.pid);
295 event->mmap.tid = bswap_32(event->mmap.tid);
296 event->mmap.start = bswap_64(event->mmap.start);
297 event->mmap.len = bswap_64(event->mmap.len);
298 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
299
300 if (sample_id_all) {
301 void *data = &event->mmap.filename;
302
303 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
304 swap_sample_id_all(event, data);
305 }
306}
307
308static void perf_event__mmap2_swap(union perf_event *event,
309 bool sample_id_all)
310{
311 event->mmap2.pid = bswap_32(event->mmap2.pid);
312 event->mmap2.tid = bswap_32(event->mmap2.tid);
313 event->mmap2.start = bswap_64(event->mmap2.start);
314 event->mmap2.len = bswap_64(event->mmap2.len);
315 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
316
317 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
318 event->mmap2.maj = bswap_32(event->mmap2.maj);
319 event->mmap2.min = bswap_32(event->mmap2.min);
320 event->mmap2.ino = bswap_64(event->mmap2.ino);
321 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
322 }
323
324 if (sample_id_all) {
325 void *data = &event->mmap2.filename;
326
327 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
328 swap_sample_id_all(event, data);
329 }
330}
331static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
332{
333 event->fork.pid = bswap_32(event->fork.pid);
334 event->fork.tid = bswap_32(event->fork.tid);
335 event->fork.ppid = bswap_32(event->fork.ppid);
336 event->fork.ptid = bswap_32(event->fork.ptid);
337 event->fork.time = bswap_64(event->fork.time);
338
339 if (sample_id_all)
340 swap_sample_id_all(event, &event->fork + 1);
341}
342
343static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
344{
345 event->read.pid = bswap_32(event->read.pid);
346 event->read.tid = bswap_32(event->read.tid);
347 event->read.value = bswap_64(event->read.value);
348 event->read.time_enabled = bswap_64(event->read.time_enabled);
349 event->read.time_running = bswap_64(event->read.time_running);
350 event->read.id = bswap_64(event->read.id);
351
352 if (sample_id_all)
353 swap_sample_id_all(event, &event->read + 1);
354}
355
356static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
357{
358 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
359 event->aux.aux_size = bswap_64(event->aux.aux_size);
360 event->aux.flags = bswap_64(event->aux.flags);
361
362 if (sample_id_all)
363 swap_sample_id_all(event, &event->aux + 1);
364}
365
366static void perf_event__itrace_start_swap(union perf_event *event,
367 bool sample_id_all)
368{
369 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
370 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
371
372 if (sample_id_all)
373 swap_sample_id_all(event, &event->itrace_start + 1);
374}
375
376static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
377{
378 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
379 event->context_switch.next_prev_pid =
380 bswap_32(event->context_switch.next_prev_pid);
381 event->context_switch.next_prev_tid =
382 bswap_32(event->context_switch.next_prev_tid);
383 }
384
385 if (sample_id_all)
386 swap_sample_id_all(event, &event->context_switch + 1);
387}
388
389static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
390{
391 event->text_poke.addr = bswap_64(event->text_poke.addr);
392 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
393 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
394
395 if (sample_id_all) {
396 size_t len = sizeof(event->text_poke.old_len) +
397 sizeof(event->text_poke.new_len) +
398 event->text_poke.old_len +
399 event->text_poke.new_len;
400 void *data = &event->text_poke.old_len;
401
402 data += PERF_ALIGN(len, sizeof(u64));
403 swap_sample_id_all(event, data);
404 }
405}
406
407static void perf_event__throttle_swap(union perf_event *event,
408 bool sample_id_all)
409{
410 event->throttle.time = bswap_64(event->throttle.time);
411 event->throttle.id = bswap_64(event->throttle.id);
412 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
413
414 if (sample_id_all)
415 swap_sample_id_all(event, &event->throttle + 1);
416}
417
418static void perf_event__namespaces_swap(union perf_event *event,
419 bool sample_id_all)
420{
421 u64 i;
422
423 event->namespaces.pid = bswap_32(event->namespaces.pid);
424 event->namespaces.tid = bswap_32(event->namespaces.tid);
425 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
426
427 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
428 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
429
430 ns->dev = bswap_64(ns->dev);
431 ns->ino = bswap_64(ns->ino);
432 }
433
434 if (sample_id_all)
435 swap_sample_id_all(event, &event->namespaces.link_info[i]);
436}
437
438static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
439{
440 event->cgroup.id = bswap_64(event->cgroup.id);
441
442 if (sample_id_all) {
443 void *data = &event->cgroup.path;
444
445 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
446 swap_sample_id_all(event, data);
447 }
448}
449
450static u8 revbyte(u8 b)
451{
452 int rev = (b >> 4) | ((b & 0xf) << 4);
453 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
454 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
455 return (u8) rev;
456}
457
458/*
459 * XXX this is hack in attempt to carry flags bitfield
460 * through endian village. ABI says:
461 *
462 * Bit-fields are allocated from right to left (least to most significant)
463 * on little-endian implementations and from left to right (most to least
464 * significant) on big-endian implementations.
465 *
466 * The above seems to be byte specific, so we need to reverse each
467 * byte of the bitfield. 'Internet' also says this might be implementation
468 * specific and we probably need proper fix and carry perf_event_attr
469 * bitfield flags in separate data file FEAT_ section. Thought this seems
470 * to work for now.
471 */
472static void swap_bitfield(u8 *p, unsigned len)
473{
474 unsigned i;
475
476 for (i = 0; i < len; i++) {
477 *p = revbyte(*p);
478 p++;
479 }
480}
481
482/* exported for swapping attributes in file header */
483void perf_event__attr_swap(struct perf_event_attr *attr)
484{
485 attr->type = bswap_32(attr->type);
486 attr->size = bswap_32(attr->size);
487
488#define bswap_safe(f, n) \
489 (attr->size > (offsetof(struct perf_event_attr, f) + \
490 sizeof(attr->f) * (n)))
491#define bswap_field(f, sz) \
492do { \
493 if (bswap_safe(f, 0)) \
494 attr->f = bswap_##sz(attr->f); \
495} while(0)
496#define bswap_field_16(f) bswap_field(f, 16)
497#define bswap_field_32(f) bswap_field(f, 32)
498#define bswap_field_64(f) bswap_field(f, 64)
499
500 bswap_field_64(config);
501 bswap_field_64(sample_period);
502 bswap_field_64(sample_type);
503 bswap_field_64(read_format);
504 bswap_field_32(wakeup_events);
505 bswap_field_32(bp_type);
506 bswap_field_64(bp_addr);
507 bswap_field_64(bp_len);
508 bswap_field_64(branch_sample_type);
509 bswap_field_64(sample_regs_user);
510 bswap_field_32(sample_stack_user);
511 bswap_field_32(aux_watermark);
512 bswap_field_16(sample_max_stack);
513 bswap_field_32(aux_sample_size);
514
515 /*
516 * After read_format are bitfields. Check read_format because
517 * we are unable to use offsetof on bitfield.
518 */
519 if (bswap_safe(read_format, 1))
520 swap_bitfield((u8 *) (&attr->read_format + 1),
521 sizeof(u64));
522#undef bswap_field_64
523#undef bswap_field_32
524#undef bswap_field
525#undef bswap_safe
526}
527
528static void perf_event__hdr_attr_swap(union perf_event *event,
529 bool sample_id_all __maybe_unused)
530{
531 size_t size;
532
533 perf_event__attr_swap(&event->attr.attr);
534
535 size = event->header.size;
536 size -= perf_record_header_attr_id(event) - (void *)event;
537 mem_bswap_64(perf_record_header_attr_id(event), size);
538}
539
540static void perf_event__event_update_swap(union perf_event *event,
541 bool sample_id_all __maybe_unused)
542{
543 event->event_update.type = bswap_64(event->event_update.type);
544 event->event_update.id = bswap_64(event->event_update.id);
545}
546
547static void perf_event__event_type_swap(union perf_event *event,
548 bool sample_id_all __maybe_unused)
549{
550 event->event_type.event_type.event_id =
551 bswap_64(event->event_type.event_type.event_id);
552}
553
554static void perf_event__tracing_data_swap(union perf_event *event,
555 bool sample_id_all __maybe_unused)
556{
557 event->tracing_data.size = bswap_32(event->tracing_data.size);
558}
559
560static void perf_event__auxtrace_info_swap(union perf_event *event,
561 bool sample_id_all __maybe_unused)
562{
563 size_t size;
564
565 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
566
567 size = event->header.size;
568 size -= (void *)&event->auxtrace_info.priv - (void *)event;
569 mem_bswap_64(event->auxtrace_info.priv, size);
570}
571
572static void perf_event__auxtrace_swap(union perf_event *event,
573 bool sample_id_all __maybe_unused)
574{
575 event->auxtrace.size = bswap_64(event->auxtrace.size);
576 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
577 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
578 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
579 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
580 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
581}
582
583static void perf_event__auxtrace_error_swap(union perf_event *event,
584 bool sample_id_all __maybe_unused)
585{
586 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
587 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
588 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
589 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
590 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
591 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
592 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
593 if (event->auxtrace_error.fmt)
594 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
595 if (event->auxtrace_error.fmt >= 2) {
596 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
597 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
598 }
599}
600
601static void perf_event__thread_map_swap(union perf_event *event,
602 bool sample_id_all __maybe_unused)
603{
604 unsigned i;
605
606 event->thread_map.nr = bswap_64(event->thread_map.nr);
607
608 for (i = 0; i < event->thread_map.nr; i++)
609 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
610}
611
612static void perf_event__cpu_map_swap(union perf_event *event,
613 bool sample_id_all __maybe_unused)
614{
615 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
616
617 data->type = bswap_16(data->type);
618
619 switch (data->type) {
620 case PERF_CPU_MAP__CPUS:
621 data->cpus_data.nr = bswap_16(data->cpus_data.nr);
622
623 for (unsigned i = 0; i < data->cpus_data.nr; i++)
624 data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
625 break;
626 case PERF_CPU_MAP__MASK:
627 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
628
629 switch (data->mask32_data.long_size) {
630 case 4:
631 data->mask32_data.nr = bswap_16(data->mask32_data.nr);
632 for (unsigned i = 0; i < data->mask32_data.nr; i++)
633 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
634 break;
635 case 8:
636 data->mask64_data.nr = bswap_16(data->mask64_data.nr);
637 for (unsigned i = 0; i < data->mask64_data.nr; i++)
638 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
639 break;
640 default:
641 pr_err("cpu_map swap: unsupported long size\n");
642 }
643 break;
644 case PERF_CPU_MAP__RANGE_CPUS:
645 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
646 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
647 break;
648 default:
649 break;
650 }
651}
652
653static void perf_event__stat_config_swap(union perf_event *event,
654 bool sample_id_all __maybe_unused)
655{
656 u64 size;
657
658 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
659 size += 1; /* nr item itself */
660 mem_bswap_64(&event->stat_config.nr, size);
661}
662
663static void perf_event__stat_swap(union perf_event *event,
664 bool sample_id_all __maybe_unused)
665{
666 event->stat.id = bswap_64(event->stat.id);
667 event->stat.thread = bswap_32(event->stat.thread);
668 event->stat.cpu = bswap_32(event->stat.cpu);
669 event->stat.val = bswap_64(event->stat.val);
670 event->stat.ena = bswap_64(event->stat.ena);
671 event->stat.run = bswap_64(event->stat.run);
672}
673
674static void perf_event__stat_round_swap(union perf_event *event,
675 bool sample_id_all __maybe_unused)
676{
677 event->stat_round.type = bswap_64(event->stat_round.type);
678 event->stat_round.time = bswap_64(event->stat_round.time);
679}
680
681static void perf_event__time_conv_swap(union perf_event *event,
682 bool sample_id_all __maybe_unused)
683{
684 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
685 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
686 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
687
688 if (event_contains(event->time_conv, time_cycles)) {
689 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
690 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
691 }
692}
693
694typedef void (*perf_event__swap_op)(union perf_event *event,
695 bool sample_id_all);
696
697static perf_event__swap_op perf_event__swap_ops[] = {
698 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
699 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
700 [PERF_RECORD_COMM] = perf_event__comm_swap,
701 [PERF_RECORD_FORK] = perf_event__task_swap,
702 [PERF_RECORD_EXIT] = perf_event__task_swap,
703 [PERF_RECORD_LOST] = perf_event__all64_swap,
704 [PERF_RECORD_READ] = perf_event__read_swap,
705 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
706 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
707 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
708 [PERF_RECORD_AUX] = perf_event__aux_swap,
709 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
710 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
711 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
712 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
713 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
714 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
715 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
716 [PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
717 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
718 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
719 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
720 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
721 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
722 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
723 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
724 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
725 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
726 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
727 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
728 [PERF_RECORD_STAT] = perf_event__stat_swap,
729 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
730 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
731 [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
732 [PERF_RECORD_HEADER_MAX] = NULL,
733};
734
735/*
736 * When perf record finishes a pass on every buffers, it records this pseudo
737 * event.
738 * We record the max timestamp t found in the pass n.
739 * Assuming these timestamps are monotonic across cpus, we know that if
740 * a buffer still has events with timestamps below t, they will be all
741 * available and then read in the pass n + 1.
742 * Hence when we start to read the pass n + 2, we can safely flush every
743 * events with timestamps below t.
744 *
745 * ============ PASS n =================
746 * CPU 0 | CPU 1
747 * |
748 * cnt1 timestamps | cnt2 timestamps
749 * 1 | 2
750 * 2 | 3
751 * - | 4 <--- max recorded
752 *
753 * ============ PASS n + 1 ==============
754 * CPU 0 | CPU 1
755 * |
756 * cnt1 timestamps | cnt2 timestamps
757 * 3 | 5
758 * 4 | 6
759 * 5 | 7 <---- max recorded
760 *
761 * Flush every events below timestamp 4
762 *
763 * ============ PASS n + 2 ==============
764 * CPU 0 | CPU 1
765 * |
766 * cnt1 timestamps | cnt2 timestamps
767 * 6 | 8
768 * 7 | 9
769 * - | 10
770 *
771 * Flush every events below timestamp 7
772 * etc...
773 */
774int perf_event__process_finished_round(const struct perf_tool *tool __maybe_unused,
775 union perf_event *event __maybe_unused,
776 struct ordered_events *oe)
777{
778 if (dump_trace)
779 fprintf(stdout, "\n");
780 return ordered_events__flush(oe, OE_FLUSH__ROUND);
781}
782
783int perf_session__queue_event(struct perf_session *s, union perf_event *event,
784 u64 timestamp, u64 file_offset, const char *file_path)
785{
786 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
787}
788
789static void callchain__lbr_callstack_printf(struct perf_sample *sample)
790{
791 struct ip_callchain *callchain = sample->callchain;
792 struct branch_stack *lbr_stack = sample->branch_stack;
793 struct branch_entry *entries = perf_sample__branch_entries(sample);
794 u64 kernel_callchain_nr = callchain->nr;
795 unsigned int i;
796
797 for (i = 0; i < kernel_callchain_nr; i++) {
798 if (callchain->ips[i] == PERF_CONTEXT_USER)
799 break;
800 }
801
802 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
803 u64 total_nr;
804 /*
805 * LBR callstack can only get user call chain,
806 * i is kernel call chain number,
807 * 1 is PERF_CONTEXT_USER.
808 *
809 * The user call chain is stored in LBR registers.
810 * LBR are pair registers. The caller is stored
811 * in "from" register, while the callee is stored
812 * in "to" register.
813 * For example, there is a call stack
814 * "A"->"B"->"C"->"D".
815 * The LBR registers will be recorded like
816 * "C"->"D", "B"->"C", "A"->"B".
817 * So only the first "to" register and all "from"
818 * registers are needed to construct the whole stack.
819 */
820 total_nr = i + 1 + lbr_stack->nr + 1;
821 kernel_callchain_nr = i + 1;
822
823 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
824
825 for (i = 0; i < kernel_callchain_nr; i++)
826 printf("..... %2d: %016" PRIx64 "\n",
827 i, callchain->ips[i]);
828
829 printf("..... %2d: %016" PRIx64 "\n",
830 (int)(kernel_callchain_nr), entries[0].to);
831 for (i = 0; i < lbr_stack->nr; i++)
832 printf("..... %2d: %016" PRIx64 "\n",
833 (int)(i + kernel_callchain_nr + 1), entries[i].from);
834 }
835}
836
837static void callchain__printf(struct evsel *evsel,
838 struct perf_sample *sample)
839{
840 unsigned int i;
841 struct ip_callchain *callchain = sample->callchain;
842
843 if (evsel__has_branch_callstack(evsel))
844 callchain__lbr_callstack_printf(sample);
845
846 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
847
848 for (i = 0; i < callchain->nr; i++)
849 printf("..... %2d: %016" PRIx64 "\n",
850 i, callchain->ips[i]);
851}
852
853static void branch_stack__printf(struct perf_sample *sample,
854 struct evsel *evsel)
855{
856 struct branch_entry *entries = perf_sample__branch_entries(sample);
857 bool callstack = evsel__has_branch_callstack(evsel);
858 u64 *branch_stack_cntr = sample->branch_stack_cntr;
859 uint64_t i;
860
861 if (!callstack) {
862 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
863 } else {
864 /* the reason of adding 1 to nr is because after expanding
865 * branch stack it generates nr + 1 callstack records. e.g.,
866 * B()->C()
867 * A()->B()
868 * the final callstack should be:
869 * C()
870 * B()
871 * A()
872 */
873 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
874 }
875
876 for (i = 0; i < sample->branch_stack->nr; i++) {
877 struct branch_entry *e = &entries[i];
878
879 if (!callstack) {
880 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n",
881 i, e->from, e->to,
882 (unsigned short)e->flags.cycles,
883 e->flags.mispred ? "M" : " ",
884 e->flags.predicted ? "P" : " ",
885 e->flags.abort ? "A" : " ",
886 e->flags.in_tx ? "T" : " ",
887 (unsigned)e->flags.reserved,
888 get_branch_type(e),
889 e->flags.spec ? branch_spec_desc(e->flags.spec) : "");
890 } else {
891 if (i == 0) {
892 printf("..... %2"PRIu64": %016" PRIx64 "\n"
893 "..... %2"PRIu64": %016" PRIx64 "\n",
894 i, e->to, i+1, e->from);
895 } else {
896 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
897 }
898 }
899 }
900
901 if (branch_stack_cntr) {
902 unsigned int br_cntr_width, br_cntr_nr;
903
904 perf_env__find_br_cntr_info(evsel__env(evsel), &br_cntr_nr, &br_cntr_width);
905 printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n",
906 sample->branch_stack->nr, br_cntr_width, br_cntr_nr);
907 for (i = 0; i < sample->branch_stack->nr; i++)
908 printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]);
909 }
910}
911
912static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
913{
914 unsigned rid, i = 0;
915
916 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
917 u64 val = regs[i++];
918
919 printf(".... %-5s 0x%016" PRIx64 "\n",
920 perf_reg_name(rid, arch), val);
921 }
922}
923
924static const char *regs_abi[] = {
925 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
926 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
927 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
928};
929
930static inline const char *regs_dump_abi(struct regs_dump *d)
931{
932 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
933 return "unknown";
934
935 return regs_abi[d->abi];
936}
937
938static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
939{
940 u64 mask = regs->mask;
941
942 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
943 type,
944 mask,
945 regs_dump_abi(regs));
946
947 regs_dump__printf(mask, regs->regs, arch);
948}
949
950static void regs_user__printf(struct perf_sample *sample, const char *arch)
951{
952 struct regs_dump *user_regs = &sample->user_regs;
953
954 if (user_regs->regs)
955 regs__printf("user", user_regs, arch);
956}
957
958static void regs_intr__printf(struct perf_sample *sample, const char *arch)
959{
960 struct regs_dump *intr_regs = &sample->intr_regs;
961
962 if (intr_regs->regs)
963 regs__printf("intr", intr_regs, arch);
964}
965
966static void stack_user__printf(struct stack_dump *dump)
967{
968 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
969 dump->size, dump->offset);
970}
971
972static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
973{
974 u64 sample_type = __evlist__combined_sample_type(evlist);
975
976 if (event->header.type != PERF_RECORD_SAMPLE &&
977 !evlist__sample_id_all(evlist)) {
978 fputs("-1 -1 ", stdout);
979 return;
980 }
981
982 if ((sample_type & PERF_SAMPLE_CPU))
983 printf("%u ", sample->cpu);
984
985 if (sample_type & PERF_SAMPLE_TIME)
986 printf("%" PRIu64 " ", sample->time);
987}
988
989static void sample_read__printf(struct perf_sample *sample, u64 read_format)
990{
991 printf("... sample_read:\n");
992
993 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
994 printf("...... time enabled %016" PRIx64 "\n",
995 sample->read.time_enabled);
996
997 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
998 printf("...... time running %016" PRIx64 "\n",
999 sample->read.time_running);
1000
1001 if (read_format & PERF_FORMAT_GROUP) {
1002 struct sample_read_value *value = sample->read.group.values;
1003
1004 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1005
1006 sample_read_group__for_each(value, sample->read.group.nr, read_format) {
1007 printf("..... id %016" PRIx64
1008 ", value %016" PRIx64,
1009 value->id, value->value);
1010 if (read_format & PERF_FORMAT_LOST)
1011 printf(", lost %" PRIu64, value->lost);
1012 printf("\n");
1013 }
1014 } else {
1015 printf("..... id %016" PRIx64 ", value %016" PRIx64,
1016 sample->read.one.id, sample->read.one.value);
1017 if (read_format & PERF_FORMAT_LOST)
1018 printf(", lost %" PRIu64, sample->read.one.lost);
1019 printf("\n");
1020 }
1021}
1022
1023static void dump_event(struct evlist *evlist, union perf_event *event,
1024 u64 file_offset, struct perf_sample *sample,
1025 const char *file_path)
1026{
1027 if (!dump_trace)
1028 return;
1029
1030 printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1031 file_offset, file_path, event->header.size, event->header.type);
1032
1033 trace_event(event);
1034 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1035 evlist->trace_event_sample_raw(evlist, event, sample);
1036
1037 if (sample)
1038 evlist__print_tstamp(evlist, event, sample);
1039
1040 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1041 event->header.size, perf_event__name(event->header.type));
1042}
1043
1044char *get_page_size_name(u64 size, char *str)
1045{
1046 if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1047 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1048
1049 return str;
1050}
1051
1052static void dump_sample(struct evsel *evsel, union perf_event *event,
1053 struct perf_sample *sample, const char *arch)
1054{
1055 u64 sample_type;
1056 char str[PAGE_SIZE_NAME_LEN];
1057
1058 if (!dump_trace)
1059 return;
1060
1061 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1062 event->header.misc, sample->pid, sample->tid, sample->ip,
1063 sample->period, sample->addr);
1064
1065 sample_type = evsel->core.attr.sample_type;
1066
1067 if (evsel__has_callchain(evsel))
1068 callchain__printf(evsel, sample);
1069
1070 if (evsel__has_br_stack(evsel))
1071 branch_stack__printf(sample, evsel);
1072
1073 if (sample_type & PERF_SAMPLE_REGS_USER)
1074 regs_user__printf(sample, arch);
1075
1076 if (sample_type & PERF_SAMPLE_REGS_INTR)
1077 regs_intr__printf(sample, arch);
1078
1079 if (sample_type & PERF_SAMPLE_STACK_USER)
1080 stack_user__printf(&sample->user_stack);
1081
1082 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1083 printf("... weight: %" PRIu64 "", sample->weight);
1084 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1085 printf(",0x%"PRIx16"", sample->ins_lat);
1086 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1087 }
1088 printf("\n");
1089 }
1090
1091 if (sample_type & PERF_SAMPLE_DATA_SRC)
1092 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1093
1094 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1095 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1096
1097 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1098 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1099
1100 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1101 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1102
1103 if (sample_type & PERF_SAMPLE_TRANSACTION)
1104 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1105
1106 if (sample_type & PERF_SAMPLE_READ)
1107 sample_read__printf(sample, evsel->core.attr.read_format);
1108}
1109
1110static void dump_read(struct evsel *evsel, union perf_event *event)
1111{
1112 struct perf_record_read *read_event = &event->read;
1113 u64 read_format;
1114
1115 if (!dump_trace)
1116 return;
1117
1118 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1119 evsel__name(evsel), event->read.value);
1120
1121 if (!evsel)
1122 return;
1123
1124 read_format = evsel->core.attr.read_format;
1125
1126 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1127 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1128
1129 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1130 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1131
1132 if (read_format & PERF_FORMAT_ID)
1133 printf("... id : %" PRI_lu64 "\n", read_event->id);
1134
1135 if (read_format & PERF_FORMAT_LOST)
1136 printf("... lost : %" PRI_lu64 "\n", read_event->lost);
1137}
1138
1139static struct machine *machines__find_for_cpumode(struct machines *machines,
1140 union perf_event *event,
1141 struct perf_sample *sample)
1142{
1143 if (perf_guest &&
1144 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1145 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1146 u32 pid;
1147
1148 if (sample->machine_pid)
1149 pid = sample->machine_pid;
1150 else if (event->header.type == PERF_RECORD_MMAP
1151 || event->header.type == PERF_RECORD_MMAP2)
1152 pid = event->mmap.pid;
1153 else
1154 pid = sample->pid;
1155
1156 /*
1157 * Guest code machine is created as needed and does not use
1158 * DEFAULT_GUEST_KERNEL_ID.
1159 */
1160 if (symbol_conf.guest_code)
1161 return machines__findnew(machines, pid);
1162
1163 return machines__find_guest(machines, pid);
1164 }
1165
1166 return &machines->host;
1167}
1168
1169static int deliver_sample_value(struct evlist *evlist,
1170 const struct perf_tool *tool,
1171 union perf_event *event,
1172 struct perf_sample *sample,
1173 struct sample_read_value *v,
1174 struct machine *machine,
1175 bool per_thread)
1176{
1177 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1178 struct evsel *evsel;
1179 u64 *storage = NULL;
1180
1181 if (sid) {
1182 storage = perf_sample_id__get_period_storage(sid, sample->tid, per_thread);
1183 }
1184
1185 if (storage) {
1186 sample->id = v->id;
1187 sample->period = v->value - *storage;
1188 *storage = v->value;
1189 }
1190
1191 if (!storage || sid->evsel == NULL) {
1192 ++evlist->stats.nr_unknown_id;
1193 return 0;
1194 }
1195
1196 /*
1197 * There's no reason to deliver sample
1198 * for zero period, bail out.
1199 */
1200 if (!sample->period)
1201 return 0;
1202
1203 evsel = container_of(sid->evsel, struct evsel, core);
1204 return tool->sample(tool, event, sample, evsel, machine);
1205}
1206
1207static int deliver_sample_group(struct evlist *evlist,
1208 const struct perf_tool *tool,
1209 union perf_event *event,
1210 struct perf_sample *sample,
1211 struct machine *machine,
1212 u64 read_format,
1213 bool per_thread)
1214{
1215 int ret = -EINVAL;
1216 struct sample_read_value *v = sample->read.group.values;
1217
1218 if (tool->dont_split_sample_group)
1219 return deliver_sample_value(evlist, tool, event, sample, v, machine,
1220 per_thread);
1221
1222 sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1223 ret = deliver_sample_value(evlist, tool, event, sample, v,
1224 machine, per_thread);
1225 if (ret)
1226 break;
1227 }
1228
1229 return ret;
1230}
1231
1232static int evlist__deliver_sample(struct evlist *evlist, const struct perf_tool *tool,
1233 union perf_event *event, struct perf_sample *sample,
1234 struct evsel *evsel, struct machine *machine)
1235{
1236 /* We know evsel != NULL. */
1237 u64 sample_type = evsel->core.attr.sample_type;
1238 u64 read_format = evsel->core.attr.read_format;
1239 bool per_thread = perf_evsel__attr_has_per_thread_sample_period(&evsel->core);
1240
1241 /* Standard sample delivery. */
1242 if (!(sample_type & PERF_SAMPLE_READ))
1243 return tool->sample(tool, event, sample, evsel, machine);
1244
1245 /* For PERF_SAMPLE_READ we have either single or group mode. */
1246 if (read_format & PERF_FORMAT_GROUP)
1247 return deliver_sample_group(evlist, tool, event, sample,
1248 machine, read_format, per_thread);
1249 else
1250 return deliver_sample_value(evlist, tool, event, sample,
1251 &sample->read.one, machine,
1252 per_thread);
1253}
1254
1255static int machines__deliver_event(struct machines *machines,
1256 struct evlist *evlist,
1257 union perf_event *event,
1258 struct perf_sample *sample,
1259 const struct perf_tool *tool, u64 file_offset,
1260 const char *file_path)
1261{
1262 struct evsel *evsel;
1263 struct machine *machine;
1264
1265 dump_event(evlist, event, file_offset, sample, file_path);
1266
1267 evsel = evlist__id2evsel(evlist, sample->id);
1268
1269 machine = machines__find_for_cpumode(machines, event, sample);
1270
1271 switch (event->header.type) {
1272 case PERF_RECORD_SAMPLE:
1273 if (evsel == NULL) {
1274 ++evlist->stats.nr_unknown_id;
1275 return 0;
1276 }
1277 if (machine == NULL) {
1278 ++evlist->stats.nr_unprocessable_samples;
1279 dump_sample(evsel, event, sample, perf_env__arch(NULL));
1280 return 0;
1281 }
1282 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1283 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1284 case PERF_RECORD_MMAP:
1285 return tool->mmap(tool, event, sample, machine);
1286 case PERF_RECORD_MMAP2:
1287 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1288 ++evlist->stats.nr_proc_map_timeout;
1289 return tool->mmap2(tool, event, sample, machine);
1290 case PERF_RECORD_COMM:
1291 return tool->comm(tool, event, sample, machine);
1292 case PERF_RECORD_NAMESPACES:
1293 return tool->namespaces(tool, event, sample, machine);
1294 case PERF_RECORD_CGROUP:
1295 return tool->cgroup(tool, event, sample, machine);
1296 case PERF_RECORD_FORK:
1297 return tool->fork(tool, event, sample, machine);
1298 case PERF_RECORD_EXIT:
1299 return tool->exit(tool, event, sample, machine);
1300 case PERF_RECORD_LOST:
1301 if (tool->lost == perf_event__process_lost)
1302 evlist->stats.total_lost += event->lost.lost;
1303 return tool->lost(tool, event, sample, machine);
1304 case PERF_RECORD_LOST_SAMPLES:
1305 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
1306 evlist->stats.total_dropped_samples += event->lost_samples.lost;
1307 else if (tool->lost_samples == perf_event__process_lost_samples)
1308 evlist->stats.total_lost_samples += event->lost_samples.lost;
1309 return tool->lost_samples(tool, event, sample, machine);
1310 case PERF_RECORD_READ:
1311 dump_read(evsel, event);
1312 return tool->read(tool, event, sample, evsel, machine);
1313 case PERF_RECORD_THROTTLE:
1314 return tool->throttle(tool, event, sample, machine);
1315 case PERF_RECORD_UNTHROTTLE:
1316 return tool->unthrottle(tool, event, sample, machine);
1317 case PERF_RECORD_AUX:
1318 if (tool->aux == perf_event__process_aux) {
1319 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1320 evlist->stats.total_aux_lost += 1;
1321 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1322 evlist->stats.total_aux_partial += 1;
1323 if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1324 evlist->stats.total_aux_collision += 1;
1325 }
1326 return tool->aux(tool, event, sample, machine);
1327 case PERF_RECORD_ITRACE_START:
1328 return tool->itrace_start(tool, event, sample, machine);
1329 case PERF_RECORD_SWITCH:
1330 case PERF_RECORD_SWITCH_CPU_WIDE:
1331 return tool->context_switch(tool, event, sample, machine);
1332 case PERF_RECORD_KSYMBOL:
1333 return tool->ksymbol(tool, event, sample, machine);
1334 case PERF_RECORD_BPF_EVENT:
1335 return tool->bpf(tool, event, sample, machine);
1336 case PERF_RECORD_TEXT_POKE:
1337 return tool->text_poke(tool, event, sample, machine);
1338 case PERF_RECORD_AUX_OUTPUT_HW_ID:
1339 return tool->aux_output_hw_id(tool, event, sample, machine);
1340 default:
1341 ++evlist->stats.nr_unknown_events;
1342 return -1;
1343 }
1344}
1345
1346static int perf_session__deliver_event(struct perf_session *session,
1347 union perf_event *event,
1348 const struct perf_tool *tool,
1349 u64 file_offset,
1350 const char *file_path)
1351{
1352 struct perf_sample sample;
1353 int ret = evlist__parse_sample(session->evlist, event, &sample);
1354
1355 if (ret) {
1356 pr_err("Can't parse sample, err = %d\n", ret);
1357 return ret;
1358 }
1359
1360 ret = auxtrace__process_event(session, event, &sample, tool);
1361 if (ret < 0)
1362 return ret;
1363 if (ret > 0)
1364 return 0;
1365
1366 ret = machines__deliver_event(&session->machines, session->evlist,
1367 event, &sample, tool, file_offset, file_path);
1368
1369 if (dump_trace && sample.aux_sample.size)
1370 auxtrace__dump_auxtrace_sample(session, &sample);
1371
1372 return ret;
1373}
1374
1375static s64 perf_session__process_user_event(struct perf_session *session,
1376 union perf_event *event,
1377 u64 file_offset,
1378 const char *file_path)
1379{
1380 struct ordered_events *oe = &session->ordered_events;
1381 const struct perf_tool *tool = session->tool;
1382 struct perf_sample sample = { .time = 0, };
1383 int fd = perf_data__fd(session->data);
1384 int err;
1385
1386 if (event->header.type != PERF_RECORD_COMPRESSED || perf_tool__compressed_is_stub(tool))
1387 dump_event(session->evlist, event, file_offset, &sample, file_path);
1388
1389 /* These events are processed right away */
1390 switch (event->header.type) {
1391 case PERF_RECORD_HEADER_ATTR:
1392 err = tool->attr(tool, event, &session->evlist);
1393 if (err == 0) {
1394 perf_session__set_id_hdr_size(session);
1395 perf_session__set_comm_exec(session);
1396 }
1397 return err;
1398 case PERF_RECORD_EVENT_UPDATE:
1399 return tool->event_update(tool, event, &session->evlist);
1400 case PERF_RECORD_HEADER_EVENT_TYPE:
1401 /*
1402 * Deprecated, but we need to handle it for sake
1403 * of old data files create in pipe mode.
1404 */
1405 return 0;
1406 case PERF_RECORD_HEADER_TRACING_DATA:
1407 /*
1408 * Setup for reading amidst mmap, but only when we
1409 * are in 'file' mode. The 'pipe' fd is in proper
1410 * place already.
1411 */
1412 if (!perf_data__is_pipe(session->data))
1413 lseek(fd, file_offset, SEEK_SET);
1414 return tool->tracing_data(session, event);
1415 case PERF_RECORD_HEADER_BUILD_ID:
1416 return tool->build_id(session, event);
1417 case PERF_RECORD_FINISHED_ROUND:
1418 return tool->finished_round(tool, event, oe);
1419 case PERF_RECORD_ID_INDEX:
1420 return tool->id_index(session, event);
1421 case PERF_RECORD_AUXTRACE_INFO:
1422 return tool->auxtrace_info(session, event);
1423 case PERF_RECORD_AUXTRACE:
1424 /*
1425 * Setup for reading amidst mmap, but only when we
1426 * are in 'file' mode. The 'pipe' fd is in proper
1427 * place already.
1428 */
1429 if (!perf_data__is_pipe(session->data))
1430 lseek(fd, file_offset + event->header.size, SEEK_SET);
1431 return tool->auxtrace(session, event);
1432 case PERF_RECORD_AUXTRACE_ERROR:
1433 perf_session__auxtrace_error_inc(session, event);
1434 return tool->auxtrace_error(session, event);
1435 case PERF_RECORD_THREAD_MAP:
1436 return tool->thread_map(session, event);
1437 case PERF_RECORD_CPU_MAP:
1438 return tool->cpu_map(session, event);
1439 case PERF_RECORD_STAT_CONFIG:
1440 return tool->stat_config(session, event);
1441 case PERF_RECORD_STAT:
1442 return tool->stat(session, event);
1443 case PERF_RECORD_STAT_ROUND:
1444 return tool->stat_round(session, event);
1445 case PERF_RECORD_TIME_CONV:
1446 session->time_conv = event->time_conv;
1447 return tool->time_conv(session, event);
1448 case PERF_RECORD_HEADER_FEATURE:
1449 return tool->feature(session, event);
1450 case PERF_RECORD_COMPRESSED:
1451 err = tool->compressed(session, event, file_offset, file_path);
1452 if (err)
1453 dump_event(session->evlist, event, file_offset, &sample, file_path);
1454 return err;
1455 case PERF_RECORD_FINISHED_INIT:
1456 return tool->finished_init(session, event);
1457 default:
1458 return -EINVAL;
1459 }
1460}
1461
1462int perf_session__deliver_synth_event(struct perf_session *session,
1463 union perf_event *event,
1464 struct perf_sample *sample)
1465{
1466 struct evlist *evlist = session->evlist;
1467 const struct perf_tool *tool = session->tool;
1468
1469 events_stats__inc(&evlist->stats, event->header.type);
1470
1471 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1472 return perf_session__process_user_event(session, event, 0, NULL);
1473
1474 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1475}
1476
1477int perf_session__deliver_synth_attr_event(struct perf_session *session,
1478 const struct perf_event_attr *attr,
1479 u64 id)
1480{
1481 union {
1482 struct {
1483 struct perf_record_header_attr attr;
1484 u64 ids[1];
1485 } attr_id;
1486 union perf_event ev;
1487 } ev = {
1488 .attr_id.attr.header.type = PERF_RECORD_HEADER_ATTR,
1489 .attr_id.attr.header.size = sizeof(ev.attr_id),
1490 .attr_id.ids[0] = id,
1491 };
1492
1493 if (attr->size != sizeof(ev.attr_id.attr.attr)) {
1494 pr_debug("Unexpected perf_event_attr size\n");
1495 return -EINVAL;
1496 }
1497 ev.attr_id.attr.attr = *attr;
1498 return perf_session__deliver_synth_event(session, &ev.ev, NULL);
1499}
1500
1501static void event_swap(union perf_event *event, bool sample_id_all)
1502{
1503 perf_event__swap_op swap;
1504
1505 swap = perf_event__swap_ops[event->header.type];
1506 if (swap)
1507 swap(event, sample_id_all);
1508}
1509
1510int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1511 void *buf, size_t buf_sz,
1512 union perf_event **event_ptr,
1513 struct perf_sample *sample)
1514{
1515 union perf_event *event;
1516 size_t hdr_sz, rest;
1517 int fd;
1518
1519 if (session->one_mmap && !session->header.needs_swap) {
1520 event = file_offset - session->one_mmap_offset +
1521 session->one_mmap_addr;
1522 goto out_parse_sample;
1523 }
1524
1525 if (perf_data__is_pipe(session->data))
1526 return -1;
1527
1528 fd = perf_data__fd(session->data);
1529 hdr_sz = sizeof(struct perf_event_header);
1530
1531 if (buf_sz < hdr_sz)
1532 return -1;
1533
1534 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1535 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1536 return -1;
1537
1538 event = (union perf_event *)buf;
1539
1540 if (session->header.needs_swap)
1541 perf_event_header__bswap(&event->header);
1542
1543 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1544 return -1;
1545
1546 buf += hdr_sz;
1547 rest = event->header.size - hdr_sz;
1548
1549 if (readn(fd, buf, rest) != (ssize_t)rest)
1550 return -1;
1551
1552 if (session->header.needs_swap)
1553 event_swap(event, evlist__sample_id_all(session->evlist));
1554
1555out_parse_sample:
1556
1557 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1558 evlist__parse_sample(session->evlist, event, sample))
1559 return -1;
1560
1561 *event_ptr = event;
1562
1563 return 0;
1564}
1565
1566int perf_session__peek_events(struct perf_session *session, u64 offset,
1567 u64 size, peek_events_cb_t cb, void *data)
1568{
1569 u64 max_offset = offset + size;
1570 char buf[PERF_SAMPLE_MAX_SIZE];
1571 union perf_event *event;
1572 int err;
1573
1574 do {
1575 err = perf_session__peek_event(session, offset, buf,
1576 PERF_SAMPLE_MAX_SIZE, &event,
1577 NULL);
1578 if (err)
1579 return err;
1580
1581 err = cb(session, event, offset, data);
1582 if (err)
1583 return err;
1584
1585 offset += event->header.size;
1586 if (event->header.type == PERF_RECORD_AUXTRACE)
1587 offset += event->auxtrace.size;
1588
1589 } while (offset < max_offset);
1590
1591 return err;
1592}
1593
1594static s64 perf_session__process_event(struct perf_session *session,
1595 union perf_event *event, u64 file_offset,
1596 const char *file_path)
1597{
1598 struct evlist *evlist = session->evlist;
1599 const struct perf_tool *tool = session->tool;
1600 int ret;
1601
1602 if (session->header.needs_swap)
1603 event_swap(event, evlist__sample_id_all(evlist));
1604
1605 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1606 return -EINVAL;
1607
1608 events_stats__inc(&evlist->stats, event->header.type);
1609
1610 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1611 return perf_session__process_user_event(session, event, file_offset, file_path);
1612
1613 if (tool->ordered_events) {
1614 u64 timestamp = -1ULL;
1615
1616 ret = evlist__parse_sample_timestamp(evlist, event, ×tamp);
1617 if (ret && ret != -1)
1618 return ret;
1619
1620 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1621 if (ret != -ETIME)
1622 return ret;
1623 }
1624
1625 return perf_session__deliver_event(session, event, tool, file_offset, file_path);
1626}
1627
1628void perf_event_header__bswap(struct perf_event_header *hdr)
1629{
1630 hdr->type = bswap_32(hdr->type);
1631 hdr->misc = bswap_16(hdr->misc);
1632 hdr->size = bswap_16(hdr->size);
1633}
1634
1635struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1636{
1637 return machine__findnew_thread(&session->machines.host, -1, pid);
1638}
1639
1640int perf_session__register_idle_thread(struct perf_session *session)
1641{
1642 struct thread *thread = machine__idle_thread(&session->machines.host);
1643
1644 /* machine__idle_thread() got the thread, so put it */
1645 thread__put(thread);
1646 return thread ? 0 : -1;
1647}
1648
1649static void
1650perf_session__warn_order(const struct perf_session *session)
1651{
1652 const struct ordered_events *oe = &session->ordered_events;
1653 struct evsel *evsel;
1654 bool should_warn = true;
1655
1656 evlist__for_each_entry(session->evlist, evsel) {
1657 if (evsel->core.attr.write_backward)
1658 should_warn = false;
1659 }
1660
1661 if (!should_warn)
1662 return;
1663 if (oe->nr_unordered_events != 0)
1664 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1665}
1666
1667static void perf_session__warn_about_errors(const struct perf_session *session)
1668{
1669 const struct events_stats *stats = &session->evlist->stats;
1670
1671 if (session->tool->lost == perf_event__process_lost &&
1672 stats->nr_events[PERF_RECORD_LOST] != 0) {
1673 ui__warning("Processed %d events and lost %d chunks!\n\n"
1674 "Check IO/CPU overload!\n\n",
1675 stats->nr_events[0],
1676 stats->nr_events[PERF_RECORD_LOST]);
1677 }
1678
1679 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1680 double drop_rate;
1681
1682 drop_rate = (double)stats->total_lost_samples /
1683 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1684 if (drop_rate > 0.05) {
1685 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1686 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1687 drop_rate * 100.0);
1688 }
1689 }
1690
1691 if (session->tool->aux == perf_event__process_aux &&
1692 stats->total_aux_lost != 0) {
1693 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1694 stats->total_aux_lost,
1695 stats->nr_events[PERF_RECORD_AUX]);
1696 }
1697
1698 if (session->tool->aux == perf_event__process_aux &&
1699 stats->total_aux_partial != 0) {
1700 bool vmm_exclusive = false;
1701
1702 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1703 &vmm_exclusive);
1704
1705 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1706 "Are you running a KVM guest in the background?%s\n\n",
1707 stats->total_aux_partial,
1708 stats->nr_events[PERF_RECORD_AUX],
1709 vmm_exclusive ?
1710 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1711 "will reduce the gaps to only guest's timeslices." :
1712 "");
1713 }
1714
1715 if (session->tool->aux == perf_event__process_aux &&
1716 stats->total_aux_collision != 0) {
1717 ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n",
1718 stats->total_aux_collision,
1719 stats->nr_events[PERF_RECORD_AUX]);
1720 }
1721
1722 if (stats->nr_unknown_events != 0) {
1723 ui__warning("Found %u unknown events!\n\n"
1724 "Is this an older tool processing a perf.data "
1725 "file generated by a more recent tool?\n\n"
1726 "If that is not the case, consider "
1727 "reporting to linux-kernel@vger.kernel.org.\n\n",
1728 stats->nr_unknown_events);
1729 }
1730
1731 if (stats->nr_unknown_id != 0) {
1732 ui__warning("%u samples with id not present in the header\n",
1733 stats->nr_unknown_id);
1734 }
1735
1736 if (stats->nr_invalid_chains != 0) {
1737 ui__warning("Found invalid callchains!\n\n"
1738 "%u out of %u events were discarded for this reason.\n\n"
1739 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1740 stats->nr_invalid_chains,
1741 stats->nr_events[PERF_RECORD_SAMPLE]);
1742 }
1743
1744 if (stats->nr_unprocessable_samples != 0) {
1745 ui__warning("%u unprocessable samples recorded.\n"
1746 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1747 stats->nr_unprocessable_samples);
1748 }
1749
1750 perf_session__warn_order(session);
1751
1752 events_stats__auxtrace_error_warn(stats);
1753
1754 if (stats->nr_proc_map_timeout != 0) {
1755 ui__warning("%d map information files for pre-existing threads were\n"
1756 "not processed, if there are samples for addresses they\n"
1757 "will not be resolved, you may find out which are these\n"
1758 "threads by running with -v and redirecting the output\n"
1759 "to a file.\n"
1760 "The time limit to process proc map is too short?\n"
1761 "Increase it by --proc-map-timeout\n",
1762 stats->nr_proc_map_timeout);
1763 }
1764}
1765
1766static int perf_session__flush_thread_stack(struct thread *thread,
1767 void *p __maybe_unused)
1768{
1769 return thread_stack__flush(thread);
1770}
1771
1772static int perf_session__flush_thread_stacks(struct perf_session *session)
1773{
1774 return machines__for_each_thread(&session->machines,
1775 perf_session__flush_thread_stack,
1776 NULL);
1777}
1778
1779volatile sig_atomic_t session_done;
1780
1781static int __perf_session__process_decomp_events(struct perf_session *session);
1782
1783static int __perf_session__process_pipe_events(struct perf_session *session)
1784{
1785 struct ordered_events *oe = &session->ordered_events;
1786 const struct perf_tool *tool = session->tool;
1787 struct ui_progress prog;
1788 union perf_event *event;
1789 uint32_t size, cur_size = 0;
1790 void *buf = NULL;
1791 s64 skip = 0;
1792 u64 head;
1793 ssize_t err;
1794 void *p;
1795 bool update_prog = false;
1796
1797 /*
1798 * If it's from a file saving pipe data (by redirection), it would have
1799 * a file name other than "-". Then we can get the total size and show
1800 * the progress.
1801 */
1802 if (strcmp(session->data->path, "-") && session->data->file.size) {
1803 ui_progress__init_size(&prog, session->data->file.size,
1804 "Processing events...");
1805 update_prog = true;
1806 }
1807
1808 head = 0;
1809 cur_size = sizeof(union perf_event);
1810
1811 buf = malloc(cur_size);
1812 if (!buf)
1813 return -errno;
1814 ordered_events__set_copy_on_queue(oe, true);
1815more:
1816 event = buf;
1817 err = perf_data__read(session->data, event,
1818 sizeof(struct perf_event_header));
1819 if (err <= 0) {
1820 if (err == 0)
1821 goto done;
1822
1823 pr_err("failed to read event header\n");
1824 goto out_err;
1825 }
1826
1827 if (session->header.needs_swap)
1828 perf_event_header__bswap(&event->header);
1829
1830 size = event->header.size;
1831 if (size < sizeof(struct perf_event_header)) {
1832 pr_err("bad event header size\n");
1833 goto out_err;
1834 }
1835
1836 if (size > cur_size) {
1837 void *new = realloc(buf, size);
1838 if (!new) {
1839 pr_err("failed to allocate memory to read event\n");
1840 goto out_err;
1841 }
1842 buf = new;
1843 cur_size = size;
1844 event = buf;
1845 }
1846 p = event;
1847 p += sizeof(struct perf_event_header);
1848
1849 if (size - sizeof(struct perf_event_header)) {
1850 err = perf_data__read(session->data, p,
1851 size - sizeof(struct perf_event_header));
1852 if (err <= 0) {
1853 if (err == 0) {
1854 pr_err("unexpected end of event stream\n");
1855 goto done;
1856 }
1857
1858 pr_err("failed to read event data\n");
1859 goto out_err;
1860 }
1861 }
1862
1863 if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
1864 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1865 head, event->header.size, event->header.type);
1866 err = -EINVAL;
1867 goto out_err;
1868 }
1869
1870 head += size;
1871
1872 if (skip > 0)
1873 head += skip;
1874
1875 err = __perf_session__process_decomp_events(session);
1876 if (err)
1877 goto out_err;
1878
1879 if (update_prog)
1880 ui_progress__update(&prog, size);
1881
1882 if (!session_done())
1883 goto more;
1884done:
1885 /* do the final flush for ordered samples */
1886 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1887 if (err)
1888 goto out_err;
1889 err = auxtrace__flush_events(session, tool);
1890 if (err)
1891 goto out_err;
1892 err = perf_session__flush_thread_stacks(session);
1893out_err:
1894 free(buf);
1895 if (update_prog)
1896 ui_progress__finish();
1897 if (!tool->no_warn)
1898 perf_session__warn_about_errors(session);
1899 ordered_events__free(&session->ordered_events);
1900 auxtrace__free_events(session);
1901 return err;
1902}
1903
1904static union perf_event *
1905prefetch_event(char *buf, u64 head, size_t mmap_size,
1906 bool needs_swap, union perf_event *error)
1907{
1908 union perf_event *event;
1909 u16 event_size;
1910
1911 /*
1912 * Ensure we have enough space remaining to read
1913 * the size of the event in the headers.
1914 */
1915 if (head + sizeof(event->header) > mmap_size)
1916 return NULL;
1917
1918 event = (union perf_event *)(buf + head);
1919 if (needs_swap)
1920 perf_event_header__bswap(&event->header);
1921
1922 event_size = event->header.size;
1923 if (head + event_size <= mmap_size)
1924 return event;
1925
1926 /* We're not fetching the event so swap back again */
1927 if (needs_swap)
1928 perf_event_header__bswap(&event->header);
1929
1930 /* Check if the event fits into the next mmapped buf. */
1931 if (event_size <= mmap_size - head % page_size) {
1932 /* Remap buf and fetch again. */
1933 return NULL;
1934 }
1935
1936 /* Invalid input. Event size should never exceed mmap_size. */
1937 pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
1938 " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
1939
1940 return error;
1941}
1942
1943static union perf_event *
1944fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
1945{
1946 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
1947}
1948
1949static union perf_event *
1950fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
1951{
1952 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
1953}
1954
1955static int __perf_session__process_decomp_events(struct perf_session *session)
1956{
1957 s64 skip;
1958 u64 size;
1959 struct decomp *decomp = session->active_decomp->decomp_last;
1960
1961 if (!decomp)
1962 return 0;
1963
1964 while (decomp->head < decomp->size && !session_done()) {
1965 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
1966 session->header.needs_swap);
1967
1968 if (!event)
1969 break;
1970
1971 size = event->header.size;
1972
1973 if (size < sizeof(struct perf_event_header) ||
1974 (skip = perf_session__process_event(session, event, decomp->file_pos,
1975 decomp->file_path)) < 0) {
1976 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1977 decomp->file_pos + decomp->head, event->header.size, event->header.type);
1978 return -EINVAL;
1979 }
1980
1981 if (skip)
1982 size += skip;
1983
1984 decomp->head += size;
1985 }
1986
1987 return 0;
1988}
1989
1990/*
1991 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1992 * slices. On 32bit we use 32MB.
1993 */
1994#if BITS_PER_LONG == 64
1995#define MMAP_SIZE ULLONG_MAX
1996#define NUM_MMAPS 1
1997#else
1998#define MMAP_SIZE (32 * 1024 * 1024ULL)
1999#define NUM_MMAPS 128
2000#endif
2001
2002struct reader;
2003
2004typedef s64 (*reader_cb_t)(struct perf_session *session,
2005 union perf_event *event,
2006 u64 file_offset,
2007 const char *file_path);
2008
2009struct reader {
2010 int fd;
2011 const char *path;
2012 u64 data_size;
2013 u64 data_offset;
2014 reader_cb_t process;
2015 bool in_place_update;
2016 char *mmaps[NUM_MMAPS];
2017 size_t mmap_size;
2018 int mmap_idx;
2019 char *mmap_cur;
2020 u64 file_pos;
2021 u64 file_offset;
2022 u64 head;
2023 u64 size;
2024 bool done;
2025 struct zstd_data zstd_data;
2026 struct decomp_data decomp_data;
2027};
2028
2029static int
2030reader__init(struct reader *rd, bool *one_mmap)
2031{
2032 u64 data_size = rd->data_size;
2033 char **mmaps = rd->mmaps;
2034
2035 rd->head = rd->data_offset;
2036 data_size += rd->data_offset;
2037
2038 rd->mmap_size = MMAP_SIZE;
2039 if (rd->mmap_size > data_size) {
2040 rd->mmap_size = data_size;
2041 if (one_mmap)
2042 *one_mmap = true;
2043 }
2044
2045 memset(mmaps, 0, sizeof(rd->mmaps));
2046
2047 if (zstd_init(&rd->zstd_data, 0))
2048 return -1;
2049 rd->decomp_data.zstd_decomp = &rd->zstd_data;
2050
2051 return 0;
2052}
2053
2054static void
2055reader__release_decomp(struct reader *rd)
2056{
2057 perf_decomp__release_events(rd->decomp_data.decomp);
2058 zstd_fini(&rd->zstd_data);
2059}
2060
2061static int
2062reader__mmap(struct reader *rd, struct perf_session *session)
2063{
2064 int mmap_prot, mmap_flags;
2065 char *buf, **mmaps = rd->mmaps;
2066 u64 page_offset;
2067
2068 mmap_prot = PROT_READ;
2069 mmap_flags = MAP_SHARED;
2070
2071 if (rd->in_place_update) {
2072 mmap_prot |= PROT_WRITE;
2073 } else if (session->header.needs_swap) {
2074 mmap_prot |= PROT_WRITE;
2075 mmap_flags = MAP_PRIVATE;
2076 }
2077
2078 if (mmaps[rd->mmap_idx]) {
2079 munmap(mmaps[rd->mmap_idx], rd->mmap_size);
2080 mmaps[rd->mmap_idx] = NULL;
2081 }
2082
2083 page_offset = page_size * (rd->head / page_size);
2084 rd->file_offset += page_offset;
2085 rd->head -= page_offset;
2086
2087 buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
2088 rd->file_offset);
2089 if (buf == MAP_FAILED) {
2090 pr_err("failed to mmap file\n");
2091 return -errno;
2092 }
2093 mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
2094 rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
2095 rd->file_pos = rd->file_offset + rd->head;
2096 if (session->one_mmap) {
2097 session->one_mmap_addr = buf;
2098 session->one_mmap_offset = rd->file_offset;
2099 }
2100
2101 return 0;
2102}
2103
2104enum {
2105 READER_OK,
2106 READER_NODATA,
2107};
2108
2109static int
2110reader__read_event(struct reader *rd, struct perf_session *session,
2111 struct ui_progress *prog)
2112{
2113 u64 size;
2114 int err = READER_OK;
2115 union perf_event *event;
2116 s64 skip;
2117
2118 event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2119 session->header.needs_swap);
2120 if (IS_ERR(event))
2121 return PTR_ERR(event);
2122
2123 if (!event)
2124 return READER_NODATA;
2125
2126 size = event->header.size;
2127
2128 skip = -EINVAL;
2129
2130 if (size < sizeof(struct perf_event_header) ||
2131 (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2132 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2133 rd->file_offset + rd->head, event->header.size,
2134 event->header.type, strerror(-skip));
2135 err = skip;
2136 goto out;
2137 }
2138
2139 if (skip)
2140 size += skip;
2141
2142 rd->size += size;
2143 rd->head += size;
2144 rd->file_pos += size;
2145
2146 err = __perf_session__process_decomp_events(session);
2147 if (err)
2148 goto out;
2149
2150 ui_progress__update(prog, size);
2151
2152out:
2153 return err;
2154}
2155
2156static inline bool
2157reader__eof(struct reader *rd)
2158{
2159 return (rd->file_pos >= rd->data_size + rd->data_offset);
2160}
2161
2162static int
2163reader__process_events(struct reader *rd, struct perf_session *session,
2164 struct ui_progress *prog)
2165{
2166 int err;
2167
2168 err = reader__init(rd, &session->one_mmap);
2169 if (err)
2170 goto out;
2171
2172 session->active_decomp = &rd->decomp_data;
2173
2174remap:
2175 err = reader__mmap(rd, session);
2176 if (err)
2177 goto out;
2178
2179more:
2180 err = reader__read_event(rd, session, prog);
2181 if (err < 0)
2182 goto out;
2183 else if (err == READER_NODATA)
2184 goto remap;
2185
2186 if (session_done())
2187 goto out;
2188
2189 if (!reader__eof(rd))
2190 goto more;
2191
2192out:
2193 session->active_decomp = &session->decomp_data;
2194 return err;
2195}
2196
2197static s64 process_simple(struct perf_session *session,
2198 union perf_event *event,
2199 u64 file_offset,
2200 const char *file_path)
2201{
2202 return perf_session__process_event(session, event, file_offset, file_path);
2203}
2204
2205static int __perf_session__process_events(struct perf_session *session)
2206{
2207 struct reader rd = {
2208 .fd = perf_data__fd(session->data),
2209 .path = session->data->file.path,
2210 .data_size = session->header.data_size,
2211 .data_offset = session->header.data_offset,
2212 .process = process_simple,
2213 .in_place_update = session->data->in_place_update,
2214 };
2215 struct ordered_events *oe = &session->ordered_events;
2216 const struct perf_tool *tool = session->tool;
2217 struct ui_progress prog;
2218 int err;
2219
2220 if (rd.data_size == 0)
2221 return -1;
2222
2223 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2224
2225 err = reader__process_events(&rd, session, &prog);
2226 if (err)
2227 goto out_err;
2228 /* do the final flush for ordered samples */
2229 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2230 if (err)
2231 goto out_err;
2232 err = auxtrace__flush_events(session, tool);
2233 if (err)
2234 goto out_err;
2235 err = perf_session__flush_thread_stacks(session);
2236out_err:
2237 ui_progress__finish();
2238 if (!tool->no_warn)
2239 perf_session__warn_about_errors(session);
2240 /*
2241 * We may switching perf.data output, make ordered_events
2242 * reusable.
2243 */
2244 ordered_events__reinit(&session->ordered_events);
2245 auxtrace__free_events(session);
2246 reader__release_decomp(&rd);
2247 session->one_mmap = false;
2248 return err;
2249}
2250
2251/*
2252 * Processing 2 MB of data from each reader in sequence,
2253 * because that's the way the ordered events sorting works
2254 * most efficiently.
2255 */
2256#define READER_MAX_SIZE (2 * 1024 * 1024)
2257
2258/*
2259 * This function reads, merge and process directory data.
2260 * It assumens the version 1 of directory data, where each
2261 * data file holds per-cpu data, already sorted by kernel.
2262 */
2263static int __perf_session__process_dir_events(struct perf_session *session)
2264{
2265 struct perf_data *data = session->data;
2266 const struct perf_tool *tool = session->tool;
2267 int i, ret, readers, nr_readers;
2268 struct ui_progress prog;
2269 u64 total_size = perf_data__size(session->data);
2270 struct reader *rd;
2271
2272 ui_progress__init_size(&prog, total_size, "Processing events...");
2273
2274 nr_readers = 1;
2275 for (i = 0; i < data->dir.nr; i++) {
2276 if (data->dir.files[i].size)
2277 nr_readers++;
2278 }
2279
2280 rd = zalloc(nr_readers * sizeof(struct reader));
2281 if (!rd)
2282 return -ENOMEM;
2283
2284 rd[0] = (struct reader) {
2285 .fd = perf_data__fd(session->data),
2286 .path = session->data->file.path,
2287 .data_size = session->header.data_size,
2288 .data_offset = session->header.data_offset,
2289 .process = process_simple,
2290 .in_place_update = session->data->in_place_update,
2291 };
2292 ret = reader__init(&rd[0], NULL);
2293 if (ret)
2294 goto out_err;
2295 ret = reader__mmap(&rd[0], session);
2296 if (ret)
2297 goto out_err;
2298 readers = 1;
2299
2300 for (i = 0; i < data->dir.nr; i++) {
2301 if (!data->dir.files[i].size)
2302 continue;
2303 rd[readers] = (struct reader) {
2304 .fd = data->dir.files[i].fd,
2305 .path = data->dir.files[i].path,
2306 .data_size = data->dir.files[i].size,
2307 .data_offset = 0,
2308 .process = process_simple,
2309 .in_place_update = session->data->in_place_update,
2310 };
2311 ret = reader__init(&rd[readers], NULL);
2312 if (ret)
2313 goto out_err;
2314 ret = reader__mmap(&rd[readers], session);
2315 if (ret)
2316 goto out_err;
2317 readers++;
2318 }
2319
2320 i = 0;
2321 while (readers) {
2322 if (session_done())
2323 break;
2324
2325 if (rd[i].done) {
2326 i = (i + 1) % nr_readers;
2327 continue;
2328 }
2329 if (reader__eof(&rd[i])) {
2330 rd[i].done = true;
2331 readers--;
2332 continue;
2333 }
2334
2335 session->active_decomp = &rd[i].decomp_data;
2336 ret = reader__read_event(&rd[i], session, &prog);
2337 if (ret < 0) {
2338 goto out_err;
2339 } else if (ret == READER_NODATA) {
2340 ret = reader__mmap(&rd[i], session);
2341 if (ret)
2342 goto out_err;
2343 }
2344
2345 if (rd[i].size >= READER_MAX_SIZE) {
2346 rd[i].size = 0;
2347 i = (i + 1) % nr_readers;
2348 }
2349 }
2350
2351 ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
2352 if (ret)
2353 goto out_err;
2354
2355 ret = perf_session__flush_thread_stacks(session);
2356out_err:
2357 ui_progress__finish();
2358
2359 if (!tool->no_warn)
2360 perf_session__warn_about_errors(session);
2361
2362 /*
2363 * We may switching perf.data output, make ordered_events
2364 * reusable.
2365 */
2366 ordered_events__reinit(&session->ordered_events);
2367
2368 session->one_mmap = false;
2369
2370 session->active_decomp = &session->decomp_data;
2371 for (i = 0; i < nr_readers; i++)
2372 reader__release_decomp(&rd[i]);
2373 zfree(&rd);
2374
2375 return ret;
2376}
2377
2378int perf_session__process_events(struct perf_session *session)
2379{
2380 if (perf_session__register_idle_thread(session) < 0)
2381 return -ENOMEM;
2382
2383 if (perf_data__is_pipe(session->data))
2384 return __perf_session__process_pipe_events(session);
2385
2386 if (perf_data__is_dir(session->data) && session->data->dir.nr)
2387 return __perf_session__process_dir_events(session);
2388
2389 return __perf_session__process_events(session);
2390}
2391
2392bool perf_session__has_traces(struct perf_session *session, const char *msg)
2393{
2394 struct evsel *evsel;
2395
2396 evlist__for_each_entry(session->evlist, evsel) {
2397 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2398 return true;
2399 }
2400
2401 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2402 return false;
2403}
2404
2405int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2406{
2407 char *bracket;
2408 struct ref_reloc_sym *ref;
2409 struct kmap *kmap;
2410
2411 ref = zalloc(sizeof(struct ref_reloc_sym));
2412 if (ref == NULL)
2413 return -ENOMEM;
2414
2415 ref->name = strdup(symbol_name);
2416 if (ref->name == NULL) {
2417 free(ref);
2418 return -ENOMEM;
2419 }
2420
2421 bracket = strchr(ref->name, ']');
2422 if (bracket)
2423 *bracket = '\0';
2424
2425 ref->addr = addr;
2426
2427 kmap = map__kmap(map);
2428 if (kmap)
2429 kmap->ref_reloc_sym = ref;
2430
2431 return 0;
2432}
2433
2434size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2435{
2436 return machines__fprintf_dsos(&session->machines, fp);
2437}
2438
2439size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2440 bool (skip)(struct dso *dso, int parm), int parm)
2441{
2442 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2443}
2444
2445size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2446{
2447 size_t ret;
2448 const char *msg = "";
2449
2450 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2451 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2452
2453 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2454
2455 ret += events_stats__fprintf(&session->evlist->stats, fp);
2456 return ret;
2457}
2458
2459size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2460{
2461 /*
2462 * FIXME: Here we have to actually print all the machines in this
2463 * session, not just the host...
2464 */
2465 return machine__fprintf(&session->machines.host, fp);
2466}
2467
2468void perf_session__dump_kmaps(struct perf_session *session)
2469{
2470 int save_verbose = verbose;
2471
2472 fflush(stdout);
2473 fprintf(stderr, "Kernel and module maps:\n");
2474 verbose = 0; /* Suppress verbose to print a summary only */
2475 maps__fprintf(machine__kernel_maps(&session->machines.host), stderr);
2476 verbose = save_verbose;
2477}
2478
2479struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2480 unsigned int type)
2481{
2482 struct evsel *pos;
2483
2484 evlist__for_each_entry(session->evlist, pos) {
2485 if (pos->core.attr.type == type)
2486 return pos;
2487 }
2488 return NULL;
2489}
2490
2491int perf_session__cpu_bitmap(struct perf_session *session,
2492 const char *cpu_list, unsigned long *cpu_bitmap)
2493{
2494 int i, err = -1;
2495 struct perf_cpu_map *map;
2496 int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2497 struct perf_cpu cpu;
2498
2499 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2500 struct evsel *evsel;
2501
2502 evsel = perf_session__find_first_evtype(session, i);
2503 if (!evsel)
2504 continue;
2505
2506 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2507 pr_err("File does not contain CPU events. "
2508 "Remove -C option to proceed.\n");
2509 return -1;
2510 }
2511 }
2512
2513 map = perf_cpu_map__new(cpu_list);
2514 if (map == NULL) {
2515 pr_err("Invalid cpu_list\n");
2516 return -1;
2517 }
2518
2519 perf_cpu_map__for_each_cpu(cpu, i, map) {
2520 if (cpu.cpu >= nr_cpus) {
2521 pr_err("Requested CPU %d too large. "
2522 "Consider raising MAX_NR_CPUS\n", cpu.cpu);
2523 goto out_delete_map;
2524 }
2525
2526 __set_bit(cpu.cpu, cpu_bitmap);
2527 }
2528
2529 err = 0;
2530
2531out_delete_map:
2532 perf_cpu_map__put(map);
2533 return err;
2534}
2535
2536void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2537 bool full)
2538{
2539 if (session == NULL || fp == NULL)
2540 return;
2541
2542 fprintf(fp, "# ========\n");
2543 perf_header__fprintf_info(session, fp, full);
2544 fprintf(fp, "# ========\n#\n");
2545}
2546
2547static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
2548{
2549 struct machine *machine = machines__findnew(&session->machines, machine_pid);
2550 struct thread *thread;
2551
2552 if (!machine)
2553 return -ENOMEM;
2554
2555 machine->single_address_space = session->machines.host.single_address_space;
2556
2557 thread = machine__idle_thread(machine);
2558 if (!thread)
2559 return -ENOMEM;
2560 thread__put(thread);
2561
2562 machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
2563
2564 return 0;
2565}
2566
2567static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
2568 pid_t tid, int guest_cpu)
2569{
2570 struct machine *machine = &session->machines.host;
2571 struct thread *thread = machine__findnew_thread(machine, pid, tid);
2572
2573 if (!thread)
2574 return -ENOMEM;
2575 thread__set_guest_cpu(thread, guest_cpu);
2576 thread__put(thread);
2577
2578 return 0;
2579}
2580
2581int perf_event__process_id_index(struct perf_session *session,
2582 union perf_event *event)
2583{
2584 struct evlist *evlist = session->evlist;
2585 struct perf_record_id_index *ie = &event->id_index;
2586 size_t sz = ie->header.size - sizeof(*ie);
2587 size_t i, nr, max_nr;
2588 size_t e1_sz = sizeof(struct id_index_entry);
2589 size_t e2_sz = sizeof(struct id_index_entry_2);
2590 size_t etot_sz = e1_sz + e2_sz;
2591 struct id_index_entry_2 *e2;
2592 pid_t last_pid = 0;
2593
2594 max_nr = sz / e1_sz;
2595 nr = ie->nr;
2596 if (nr > max_nr) {
2597 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
2598 return -EINVAL;
2599 }
2600
2601 if (sz >= nr * etot_sz) {
2602 max_nr = sz / etot_sz;
2603 if (nr > max_nr) {
2604 printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
2605 return -EINVAL;
2606 }
2607 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
2608 } else {
2609 e2 = NULL;
2610 }
2611
2612 if (dump_trace)
2613 fprintf(stdout, " nr: %zu\n", nr);
2614
2615 for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
2616 struct id_index_entry *e = &ie->entries[i];
2617 struct perf_sample_id *sid;
2618 int ret;
2619
2620 if (dump_trace) {
2621 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2622 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2623 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2624 fprintf(stdout, " tid: %"PRI_ld64, e->tid);
2625 if (e2) {
2626 fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid);
2627 fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu);
2628 } else {
2629 fprintf(stdout, "\n");
2630 }
2631 }
2632
2633 sid = evlist__id2sid(evlist, e->id);
2634 if (!sid)
2635 return -ENOENT;
2636
2637 sid->idx = e->idx;
2638 sid->cpu.cpu = e->cpu;
2639 sid->tid = e->tid;
2640
2641 if (!e2)
2642 continue;
2643
2644 sid->machine_pid = e2->machine_pid;
2645 sid->vcpu.cpu = e2->vcpu;
2646
2647 if (!sid->machine_pid)
2648 continue;
2649
2650 if (sid->machine_pid != last_pid) {
2651 ret = perf_session__register_guest(session, sid->machine_pid);
2652 if (ret)
2653 return ret;
2654 last_pid = sid->machine_pid;
2655 perf_guest = true;
2656 }
2657
2658 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
2659 if (ret)
2660 return ret;
2661 }
2662 return 0;
2663}
2664
2665int perf_session__dsos_hit_all(struct perf_session *session)
2666{
2667 struct rb_node *nd;
2668 int err;
2669
2670 err = machine__hit_all_dsos(&session->machines.host);
2671 if (err)
2672 return err;
2673
2674 for (nd = rb_first_cached(&session->machines.guests); nd;
2675 nd = rb_next(nd)) {
2676 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2677
2678 err = machine__hit_all_dsos(pos);
2679 if (err)
2680 return err;
2681 }
2682
2683 return 0;
2684}