Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * builtin-report.c
4 *
5 * Builtin report command: Analyze the perf.data input file,
6 * look up and read DSOs and symbol information and display
7 * a histogram of results, along various sorting keys.
8 */
9#include "builtin.h"
10
11#include "util/config.h"
12
13#include "util/annotate.h"
14#include "util/color.h"
15#include "util/dso.h"
16#include <linux/list.h>
17#include <linux/rbtree.h>
18#include <linux/err.h>
19#include <linux/zalloc.h>
20#include "util/map.h"
21#include "util/symbol.h"
22#include "util/map_symbol.h"
23#include "util/mem-events.h"
24#include "util/branch.h"
25#include "util/callchain.h"
26#include "util/values.h"
27
28#include "perf.h"
29#include "util/debug.h"
30#include "util/evlist.h"
31#include "util/evsel.h"
32#include "util/evswitch.h"
33#include "util/header.h"
34#include "util/session.h"
35#include "util/srcline.h"
36#include "util/tool.h"
37
38#include <subcmd/parse-options.h>
39#include <subcmd/exec-cmd.h>
40#include "util/parse-events.h"
41
42#include "util/thread.h"
43#include "util/sort.h"
44#include "util/hist.h"
45#include "util/data.h"
46#include "arch/common.h"
47#include "util/time-utils.h"
48#include "util/auxtrace.h"
49#include "util/units.h"
50#include "util/branch.h"
51#include "util/util.h" // perf_tip()
52#include "ui/ui.h"
53#include "ui/progress.h"
54
55#include <dlfcn.h>
56#include <errno.h>
57#include <inttypes.h>
58#include <regex.h>
59#include <linux/ctype.h>
60#include <signal.h>
61#include <linux/bitmap.h>
62#include <linux/string.h>
63#include <linux/stringify.h>
64#include <linux/time64.h>
65#include <sys/types.h>
66#include <sys/stat.h>
67#include <unistd.h>
68#include <linux/mman.h>
69
70struct report {
71 struct perf_tool tool;
72 struct perf_session *session;
73 struct evswitch evswitch;
74 bool use_tui, use_gtk, use_stdio;
75 bool show_full_info;
76 bool show_threads;
77 bool inverted_callchain;
78 bool mem_mode;
79 bool stats_mode;
80 bool tasks_mode;
81 bool mmaps_mode;
82 bool header;
83 bool header_only;
84 bool nonany_branch_mode;
85 bool group_set;
86 int max_stack;
87 struct perf_read_values show_threads_values;
88 struct annotation_options annotation_opts;
89 const char *pretty_printing_style;
90 const char *cpu_list;
91 const char *symbol_filter_str;
92 const char *time_str;
93 struct perf_time_interval *ptime_range;
94 int range_size;
95 int range_num;
96 float min_percent;
97 u64 nr_entries;
98 u64 queue_size;
99 int socket_filter;
100 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
101 struct branch_type_stat brtype_stat;
102 bool symbol_ipc;
103};
104
105static int report__config(const char *var, const char *value, void *cb)
106{
107 struct report *rep = cb;
108
109 if (!strcmp(var, "report.group")) {
110 symbol_conf.event_group = perf_config_bool(var, value);
111 return 0;
112 }
113 if (!strcmp(var, "report.percent-limit")) {
114 double pcnt = strtof(value, NULL);
115
116 rep->min_percent = pcnt;
117 callchain_param.min_percent = pcnt;
118 return 0;
119 }
120 if (!strcmp(var, "report.children")) {
121 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
122 return 0;
123 }
124 if (!strcmp(var, "report.queue-size"))
125 return perf_config_u64(&rep->queue_size, var, value);
126
127 if (!strcmp(var, "report.sort_order")) {
128 default_sort_order = strdup(value);
129 return 0;
130 }
131
132 return 0;
133}
134
135static int hist_iter__report_callback(struct hist_entry_iter *iter,
136 struct addr_location *al, bool single,
137 void *arg)
138{
139 int err = 0;
140 struct report *rep = arg;
141 struct hist_entry *he = iter->he;
142 struct evsel *evsel = iter->evsel;
143 struct perf_sample *sample = iter->sample;
144 struct mem_info *mi;
145 struct branch_info *bi;
146
147 if (!ui__has_annotation() && !rep->symbol_ipc)
148 return 0;
149
150 if (sort__mode == SORT_MODE__BRANCH) {
151 bi = he->branch_info;
152 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
153 if (err)
154 goto out;
155
156 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
157
158 } else if (rep->mem_mode) {
159 mi = he->mem_info;
160 err = addr_map_symbol__inc_samples(&mi->daddr, sample, evsel);
161 if (err)
162 goto out;
163
164 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
165
166 } else if (symbol_conf.cumulate_callchain) {
167 if (single)
168 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
169 } else {
170 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
171 }
172
173out:
174 return err;
175}
176
177static int hist_iter__branch_callback(struct hist_entry_iter *iter,
178 struct addr_location *al __maybe_unused,
179 bool single __maybe_unused,
180 void *arg)
181{
182 struct hist_entry *he = iter->he;
183 struct report *rep = arg;
184 struct branch_info *bi;
185 struct perf_sample *sample = iter->sample;
186 struct evsel *evsel = iter->evsel;
187 int err;
188
189 if (!ui__has_annotation() && !rep->symbol_ipc)
190 return 0;
191
192 bi = he->branch_info;
193 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
194 if (err)
195 goto out;
196
197 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
198
199 branch_type_count(&rep->brtype_stat, &bi->flags,
200 bi->from.addr, bi->to.addr);
201
202out:
203 return err;
204}
205
206static void setup_forced_leader(struct report *report,
207 struct evlist *evlist)
208{
209 if (report->group_set)
210 perf_evlist__force_leader(evlist);
211}
212
213static int process_feature_event(struct perf_session *session,
214 union perf_event *event)
215{
216 struct report *rep = container_of(session->tool, struct report, tool);
217
218 if (event->feat.feat_id < HEADER_LAST_FEATURE)
219 return perf_event__process_feature(session, event);
220
221 if (event->feat.feat_id != HEADER_LAST_FEATURE) {
222 pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
223 event->feat.feat_id);
224 return -1;
225 }
226
227 /*
228 * (feat_id = HEADER_LAST_FEATURE) is the end marker which
229 * means all features are received, now we can force the
230 * group if needed.
231 */
232 setup_forced_leader(rep, session->evlist);
233 return 0;
234}
235
236static int process_sample_event(struct perf_tool *tool,
237 union perf_event *event,
238 struct perf_sample *sample,
239 struct evsel *evsel,
240 struct machine *machine)
241{
242 struct report *rep = container_of(tool, struct report, tool);
243 struct addr_location al;
244 struct hist_entry_iter iter = {
245 .evsel = evsel,
246 .sample = sample,
247 .hide_unresolved = symbol_conf.hide_unresolved,
248 .add_entry_cb = hist_iter__report_callback,
249 };
250 int ret = 0;
251
252 if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
253 sample->time)) {
254 return 0;
255 }
256
257 if (evswitch__discard(&rep->evswitch, evsel))
258 return 0;
259
260 if (machine__resolve(machine, &al, sample) < 0) {
261 pr_debug("problem processing %d event, skipping it.\n",
262 event->header.type);
263 return -1;
264 }
265
266 if (symbol_conf.hide_unresolved && al.sym == NULL)
267 goto out_put;
268
269 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
270 goto out_put;
271
272 if (sort__mode == SORT_MODE__BRANCH) {
273 /*
274 * A non-synthesized event might not have a branch stack if
275 * branch stacks have been synthesized (using itrace options).
276 */
277 if (!sample->branch_stack)
278 goto out_put;
279
280 iter.add_entry_cb = hist_iter__branch_callback;
281 iter.ops = &hist_iter_branch;
282 } else if (rep->mem_mode) {
283 iter.ops = &hist_iter_mem;
284 } else if (symbol_conf.cumulate_callchain) {
285 iter.ops = &hist_iter_cumulative;
286 } else {
287 iter.ops = &hist_iter_normal;
288 }
289
290 if (al.map != NULL)
291 al.map->dso->hit = 1;
292
293 if (ui__has_annotation() || rep->symbol_ipc) {
294 hist__account_cycles(sample->branch_stack, &al, sample,
295 rep->nonany_branch_mode);
296 }
297
298 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
299 if (ret < 0)
300 pr_debug("problem adding hist entry, skipping event\n");
301out_put:
302 addr_location__put(&al);
303 return ret;
304}
305
306static int process_read_event(struct perf_tool *tool,
307 union perf_event *event,
308 struct perf_sample *sample __maybe_unused,
309 struct evsel *evsel,
310 struct machine *machine __maybe_unused)
311{
312 struct report *rep = container_of(tool, struct report, tool);
313
314 if (rep->show_threads) {
315 const char *name = perf_evsel__name(evsel);
316 int err = perf_read_values_add_value(&rep->show_threads_values,
317 event->read.pid, event->read.tid,
318 evsel->idx,
319 name,
320 event->read.value);
321
322 if (err)
323 return err;
324 }
325
326 return 0;
327}
328
329/* For pipe mode, sample_type is not currently set */
330static int report__setup_sample_type(struct report *rep)
331{
332 struct perf_session *session = rep->session;
333 u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
334 bool is_pipe = perf_data__is_pipe(session->data);
335
336 if (session->itrace_synth_opts->callchain ||
337 (!is_pipe &&
338 perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
339 !session->itrace_synth_opts->set))
340 sample_type |= PERF_SAMPLE_CALLCHAIN;
341
342 if (session->itrace_synth_opts->last_branch)
343 sample_type |= PERF_SAMPLE_BRANCH_STACK;
344
345 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
346 if (perf_hpp_list.parent) {
347 ui__error("Selected --sort parent, but no "
348 "callchain data. Did you call "
349 "'perf record' without -g?\n");
350 return -EINVAL;
351 }
352 if (symbol_conf.use_callchain &&
353 !symbol_conf.show_branchflag_count) {
354 ui__error("Selected -g or --branch-history.\n"
355 "But no callchain or branch data.\n"
356 "Did you call 'perf record' without -g or -b?\n");
357 return -1;
358 }
359 } else if (!callchain_param.enabled &&
360 callchain_param.mode != CHAIN_NONE &&
361 !symbol_conf.use_callchain) {
362 symbol_conf.use_callchain = true;
363 if (callchain_register_param(&callchain_param) < 0) {
364 ui__error("Can't register callchain params.\n");
365 return -EINVAL;
366 }
367 }
368
369 if (symbol_conf.cumulate_callchain) {
370 /* Silently ignore if callchain is missing */
371 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
372 symbol_conf.cumulate_callchain = false;
373 perf_hpp__cancel_cumulate();
374 }
375 }
376
377 if (sort__mode == SORT_MODE__BRANCH) {
378 if (!is_pipe &&
379 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
380 ui__error("Selected -b but no branch data. "
381 "Did you call perf record without -b?\n");
382 return -1;
383 }
384 }
385
386 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
387 if ((sample_type & PERF_SAMPLE_REGS_USER) &&
388 (sample_type & PERF_SAMPLE_STACK_USER)) {
389 callchain_param.record_mode = CALLCHAIN_DWARF;
390 dwarf_callchain_users = true;
391 } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
392 callchain_param.record_mode = CALLCHAIN_LBR;
393 else
394 callchain_param.record_mode = CALLCHAIN_FP;
395 }
396
397 /* ??? handle more cases than just ANY? */
398 if (!(perf_evlist__combined_branch_type(session->evlist) &
399 PERF_SAMPLE_BRANCH_ANY))
400 rep->nonany_branch_mode = true;
401
402 return 0;
403}
404
405static void sig_handler(int sig __maybe_unused)
406{
407 session_done = 1;
408}
409
410static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
411 const char *evname, FILE *fp)
412{
413 size_t ret;
414 char unit;
415 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
416 u64 nr_events = hists->stats.total_period;
417 struct evsel *evsel = hists_to_evsel(hists);
418 char buf[512];
419 size_t size = sizeof(buf);
420 int socked_id = hists->socket_filter;
421
422 if (quiet)
423 return 0;
424
425 if (symbol_conf.filter_relative) {
426 nr_samples = hists->stats.nr_non_filtered_samples;
427 nr_events = hists->stats.total_non_filtered_period;
428 }
429
430 if (perf_evsel__is_group_event(evsel)) {
431 struct evsel *pos;
432
433 perf_evsel__group_desc(evsel, buf, size);
434 evname = buf;
435
436 for_each_group_member(pos, evsel) {
437 const struct hists *pos_hists = evsel__hists(pos);
438
439 if (symbol_conf.filter_relative) {
440 nr_samples += pos_hists->stats.nr_non_filtered_samples;
441 nr_events += pos_hists->stats.total_non_filtered_period;
442 } else {
443 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
444 nr_events += pos_hists->stats.total_period;
445 }
446 }
447 }
448
449 nr_samples = convert_unit(nr_samples, &unit);
450 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
451 if (evname != NULL) {
452 ret += fprintf(fp, " of event%s '%s'",
453 evsel->core.nr_members > 1 ? "s" : "", evname);
454 }
455
456 if (rep->time_str)
457 ret += fprintf(fp, " (time slices: %s)", rep->time_str);
458
459 if (symbol_conf.show_ref_callgraph &&
460 strstr(evname, "call-graph=no")) {
461 ret += fprintf(fp, ", show reference callgraph");
462 }
463
464 if (rep->mem_mode) {
465 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
466 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
467 } else
468 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
469
470 if (socked_id > -1)
471 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
472
473 return ret + fprintf(fp, "\n#\n");
474}
475
476static int perf_evlist__tty_browse_hists(struct evlist *evlist,
477 struct report *rep,
478 const char *help)
479{
480 struct evsel *pos;
481
482 if (!quiet) {
483 fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
484 evlist->stats.total_lost_samples);
485 }
486
487 evlist__for_each_entry(evlist, pos) {
488 struct hists *hists = evsel__hists(pos);
489 const char *evname = perf_evsel__name(pos);
490
491 if (symbol_conf.event_group &&
492 !perf_evsel__is_group_leader(pos))
493 continue;
494
495 hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
496 hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
497 !(symbol_conf.use_callchain ||
498 symbol_conf.show_branchflag_count));
499 fprintf(stdout, "\n\n");
500 }
501
502 if (!quiet)
503 fprintf(stdout, "#\n# (%s)\n#\n", help);
504
505 if (rep->show_threads) {
506 bool style = !strcmp(rep->pretty_printing_style, "raw");
507 perf_read_values_display(stdout, &rep->show_threads_values,
508 style);
509 perf_read_values_destroy(&rep->show_threads_values);
510 }
511
512 if (sort__mode == SORT_MODE__BRANCH)
513 branch_type_stat_display(stdout, &rep->brtype_stat);
514
515 return 0;
516}
517
518static void report__warn_kptr_restrict(const struct report *rep)
519{
520 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
521 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
522
523 if (perf_evlist__exclude_kernel(rep->session->evlist))
524 return;
525
526 if (kernel_map == NULL ||
527 (kernel_map->dso->hit &&
528 (kernel_kmap->ref_reloc_sym == NULL ||
529 kernel_kmap->ref_reloc_sym->addr == 0))) {
530 const char *desc =
531 "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
532 "can't be resolved.";
533
534 if (kernel_map && map__has_symbols(kernel_map)) {
535 desc = "If some relocation was applied (e.g. "
536 "kexec) symbols may be misresolved.";
537 }
538
539 ui__warning(
540"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
541"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
542"Samples in kernel modules can't be resolved as well.\n\n",
543 desc);
544 }
545}
546
547static int report__gtk_browse_hists(struct report *rep, const char *help)
548{
549 int (*hist_browser)(struct evlist *evlist, const char *help,
550 struct hist_browser_timer *timer, float min_pcnt);
551
552 hist_browser = dlsym(perf_gtk_handle, "perf_evlist__gtk_browse_hists");
553
554 if (hist_browser == NULL) {
555 ui__error("GTK browser not found!\n");
556 return -1;
557 }
558
559 return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
560}
561
562static int report__browse_hists(struct report *rep)
563{
564 int ret;
565 struct perf_session *session = rep->session;
566 struct evlist *evlist = session->evlist;
567 const char *help = perf_tip(system_path(TIPDIR));
568
569 if (help == NULL) {
570 /* fallback for people who don't install perf ;-) */
571 help = perf_tip(DOCDIR);
572 if (help == NULL)
573 help = "Cannot load tips.txt file, please install perf!";
574 }
575
576 switch (use_browser) {
577 case 1:
578 ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
579 rep->min_percent,
580 &session->header.env,
581 true, &rep->annotation_opts);
582 /*
583 * Usually "ret" is the last pressed key, and we only
584 * care if the key notifies us to switch data file.
585 */
586 if (ret != K_SWITCH_INPUT_DATA)
587 ret = 0;
588 break;
589 case 2:
590 ret = report__gtk_browse_hists(rep, help);
591 break;
592 default:
593 ret = perf_evlist__tty_browse_hists(evlist, rep, help);
594 break;
595 }
596
597 return ret;
598}
599
600static int report__collapse_hists(struct report *rep)
601{
602 struct ui_progress prog;
603 struct evsel *pos;
604 int ret = 0;
605
606 ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
607
608 evlist__for_each_entry(rep->session->evlist, pos) {
609 struct hists *hists = evsel__hists(pos);
610
611 if (pos->idx == 0)
612 hists->symbol_filter_str = rep->symbol_filter_str;
613
614 hists->socket_filter = rep->socket_filter;
615
616 ret = hists__collapse_resort(hists, &prog);
617 if (ret < 0)
618 break;
619
620 /* Non-group events are considered as leader */
621 if (symbol_conf.event_group &&
622 !perf_evsel__is_group_leader(pos)) {
623 struct hists *leader_hists = evsel__hists(pos->leader);
624
625 hists__match(leader_hists, hists);
626 hists__link(leader_hists, hists);
627 }
628 }
629
630 ui_progress__finish();
631 return ret;
632}
633
634static int hists__resort_cb(struct hist_entry *he, void *arg)
635{
636 struct report *rep = arg;
637 struct symbol *sym = he->ms.sym;
638
639 if (rep->symbol_ipc && sym && !sym->annotate2) {
640 struct evsel *evsel = hists_to_evsel(he->hists);
641
642 symbol__annotate2(sym, he->ms.map, evsel,
643 &annotation__default_options, NULL);
644 }
645
646 return 0;
647}
648
649static void report__output_resort(struct report *rep)
650{
651 struct ui_progress prog;
652 struct evsel *pos;
653
654 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
655
656 evlist__for_each_entry(rep->session->evlist, pos) {
657 perf_evsel__output_resort_cb(pos, &prog,
658 hists__resort_cb, rep);
659 }
660
661 ui_progress__finish();
662}
663
664static void stats_setup(struct report *rep)
665{
666 memset(&rep->tool, 0, sizeof(rep->tool));
667 rep->tool.no_warn = true;
668}
669
670static int stats_print(struct report *rep)
671{
672 struct perf_session *session = rep->session;
673
674 perf_session__fprintf_nr_events(session, stdout);
675 return 0;
676}
677
678static void tasks_setup(struct report *rep)
679{
680 memset(&rep->tool, 0, sizeof(rep->tool));
681 rep->tool.ordered_events = true;
682 if (rep->mmaps_mode) {
683 rep->tool.mmap = perf_event__process_mmap;
684 rep->tool.mmap2 = perf_event__process_mmap2;
685 }
686 rep->tool.comm = perf_event__process_comm;
687 rep->tool.exit = perf_event__process_exit;
688 rep->tool.fork = perf_event__process_fork;
689 rep->tool.no_warn = true;
690}
691
692struct task {
693 struct thread *thread;
694 struct list_head list;
695 struct list_head children;
696};
697
698static struct task *tasks_list(struct task *task, struct machine *machine)
699{
700 struct thread *parent_thread, *thread = task->thread;
701 struct task *parent_task;
702
703 /* Already listed. */
704 if (!list_empty(&task->list))
705 return NULL;
706
707 /* Last one in the chain. */
708 if (thread->ppid == -1)
709 return task;
710
711 parent_thread = machine__find_thread(machine, -1, thread->ppid);
712 if (!parent_thread)
713 return ERR_PTR(-ENOENT);
714
715 parent_task = thread__priv(parent_thread);
716 list_add_tail(&task->list, &parent_task->children);
717 return tasks_list(parent_task, machine);
718}
719
720static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
721{
722 size_t printed = 0;
723 struct rb_node *nd;
724
725 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
726 struct map *map = rb_entry(nd, struct map, rb_node);
727
728 printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
729 indent, "", map->start, map->end,
730 map->prot & PROT_READ ? 'r' : '-',
731 map->prot & PROT_WRITE ? 'w' : '-',
732 map->prot & PROT_EXEC ? 'x' : '-',
733 map->flags & MAP_SHARED ? 's' : 'p',
734 map->pgoff,
735 map->ino, map->dso->name);
736 }
737
738 return printed;
739}
740
741static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp)
742{
743 return maps__fprintf_task(&mg->maps, indent, fp);
744}
745
746static void task__print_level(struct task *task, FILE *fp, int level)
747{
748 struct thread *thread = task->thread;
749 struct task *child;
750 int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
751 thread->pid_, thread->tid, thread->ppid,
752 level, "");
753
754 fprintf(fp, "%s\n", thread__comm_str(thread));
755
756 map_groups__fprintf_task(thread->mg, comm_indent, fp);
757
758 if (!list_empty(&task->children)) {
759 list_for_each_entry(child, &task->children, list)
760 task__print_level(child, fp, level + 1);
761 }
762}
763
764static int tasks_print(struct report *rep, FILE *fp)
765{
766 struct perf_session *session = rep->session;
767 struct machine *machine = &session->machines.host;
768 struct task *tasks, *task;
769 unsigned int nr = 0, itask = 0, i;
770 struct rb_node *nd;
771 LIST_HEAD(list);
772
773 /*
774 * No locking needed while accessing machine->threads,
775 * because --tasks is single threaded command.
776 */
777
778 /* Count all the threads. */
779 for (i = 0; i < THREADS__TABLE_SIZE; i++)
780 nr += machine->threads[i].nr;
781
782 tasks = malloc(sizeof(*tasks) * nr);
783 if (!tasks)
784 return -ENOMEM;
785
786 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
787 struct threads *threads = &machine->threads[i];
788
789 for (nd = rb_first_cached(&threads->entries); nd;
790 nd = rb_next(nd)) {
791 task = tasks + itask++;
792
793 task->thread = rb_entry(nd, struct thread, rb_node);
794 INIT_LIST_HEAD(&task->children);
795 INIT_LIST_HEAD(&task->list);
796 thread__set_priv(task->thread, task);
797 }
798 }
799
800 /*
801 * Iterate every task down to the unprocessed parent
802 * and link all in task children list. Task with no
803 * parent is added into 'list'.
804 */
805 for (itask = 0; itask < nr; itask++) {
806 task = tasks + itask;
807
808 if (!list_empty(&task->list))
809 continue;
810
811 task = tasks_list(task, machine);
812 if (IS_ERR(task)) {
813 pr_err("Error: failed to process tasks\n");
814 free(tasks);
815 return PTR_ERR(task);
816 }
817
818 if (task)
819 list_add_tail(&task->list, &list);
820 }
821
822 fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
823
824 list_for_each_entry(task, &list, list)
825 task__print_level(task, fp, 0);
826
827 free(tasks);
828 return 0;
829}
830
831static int __cmd_report(struct report *rep)
832{
833 int ret;
834 struct perf_session *session = rep->session;
835 struct evsel *pos;
836 struct perf_data *data = session->data;
837
838 signal(SIGINT, sig_handler);
839
840 if (rep->cpu_list) {
841 ret = perf_session__cpu_bitmap(session, rep->cpu_list,
842 rep->cpu_bitmap);
843 if (ret) {
844 ui__error("failed to set cpu bitmap\n");
845 return ret;
846 }
847 session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
848 }
849
850 if (rep->show_threads) {
851 ret = perf_read_values_init(&rep->show_threads_values);
852 if (ret)
853 return ret;
854 }
855
856 ret = report__setup_sample_type(rep);
857 if (ret) {
858 /* report__setup_sample_type() already showed error message */
859 return ret;
860 }
861
862 if (rep->stats_mode)
863 stats_setup(rep);
864
865 if (rep->tasks_mode)
866 tasks_setup(rep);
867
868 ret = perf_session__process_events(session);
869 if (ret) {
870 ui__error("failed to process sample\n");
871 return ret;
872 }
873
874 if (rep->stats_mode)
875 return stats_print(rep);
876
877 if (rep->tasks_mode)
878 return tasks_print(rep, stdout);
879
880 report__warn_kptr_restrict(rep);
881
882 evlist__for_each_entry(session->evlist, pos)
883 rep->nr_entries += evsel__hists(pos)->nr_entries;
884
885 if (use_browser == 0) {
886 if (verbose > 3)
887 perf_session__fprintf(session, stdout);
888
889 if (verbose > 2)
890 perf_session__fprintf_dsos(session, stdout);
891
892 if (dump_trace) {
893 perf_session__fprintf_nr_events(session, stdout);
894 perf_evlist__fprintf_nr_events(session->evlist, stdout);
895 return 0;
896 }
897 }
898
899 ret = report__collapse_hists(rep);
900 if (ret) {
901 ui__error("failed to process hist entry\n");
902 return ret;
903 }
904
905 if (session_done())
906 return 0;
907
908 /*
909 * recalculate number of entries after collapsing since it
910 * might be changed during the collapse phase.
911 */
912 rep->nr_entries = 0;
913 evlist__for_each_entry(session->evlist, pos)
914 rep->nr_entries += evsel__hists(pos)->nr_entries;
915
916 if (rep->nr_entries == 0) {
917 ui__error("The %s data has no samples!\n", data->path);
918 return 0;
919 }
920
921 report__output_resort(rep);
922
923 return report__browse_hists(rep);
924}
925
926static int
927report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
928{
929 struct callchain_param *callchain = opt->value;
930
931 callchain->enabled = !unset;
932 /*
933 * --no-call-graph
934 */
935 if (unset) {
936 symbol_conf.use_callchain = false;
937 callchain->mode = CHAIN_NONE;
938 return 0;
939 }
940
941 return parse_callchain_report_opt(arg);
942}
943
944static int
945parse_time_quantum(const struct option *opt, const char *arg,
946 int unset __maybe_unused)
947{
948 unsigned long *time_q = opt->value;
949 char *end;
950
951 *time_q = strtoul(arg, &end, 0);
952 if (end == arg)
953 goto parse_err;
954 if (*time_q == 0) {
955 pr_err("time quantum cannot be 0");
956 return -1;
957 }
958 end = skip_spaces(end);
959 if (*end == 0)
960 return 0;
961 if (!strcmp(end, "s")) {
962 *time_q *= NSEC_PER_SEC;
963 return 0;
964 }
965 if (!strcmp(end, "ms")) {
966 *time_q *= NSEC_PER_MSEC;
967 return 0;
968 }
969 if (!strcmp(end, "us")) {
970 *time_q *= NSEC_PER_USEC;
971 return 0;
972 }
973 if (!strcmp(end, "ns"))
974 return 0;
975parse_err:
976 pr_err("Cannot parse time quantum `%s'\n", arg);
977 return -1;
978}
979
980int
981report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
982 const char *arg, int unset __maybe_unused)
983{
984 if (arg) {
985 int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
986 if (err) {
987 char buf[BUFSIZ];
988 regerror(err, &ignore_callees_regex, buf, sizeof(buf));
989 pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
990 return -1;
991 }
992 have_ignore_callees = 1;
993 }
994
995 return 0;
996}
997
998static int
999parse_branch_mode(const struct option *opt,
1000 const char *str __maybe_unused, int unset)
1001{
1002 int *branch_mode = opt->value;
1003
1004 *branch_mode = !unset;
1005 return 0;
1006}
1007
1008static int
1009parse_percent_limit(const struct option *opt, const char *str,
1010 int unset __maybe_unused)
1011{
1012 struct report *rep = opt->value;
1013 double pcnt = strtof(str, NULL);
1014
1015 rep->min_percent = pcnt;
1016 callchain_param.min_percent = pcnt;
1017 return 0;
1018}
1019
1020int cmd_report(int argc, const char **argv)
1021{
1022 struct perf_session *session;
1023 struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
1024 struct stat st;
1025 bool has_br_stack = false;
1026 int branch_mode = -1;
1027 bool branch_call_mode = false;
1028#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
1029 static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
1030 CALLCHAIN_REPORT_HELP
1031 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
1032 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
1033 const char * const report_usage[] = {
1034 "perf report [<options>]",
1035 NULL
1036 };
1037 struct report report = {
1038 .tool = {
1039 .sample = process_sample_event,
1040 .mmap = perf_event__process_mmap,
1041 .mmap2 = perf_event__process_mmap2,
1042 .comm = perf_event__process_comm,
1043 .namespaces = perf_event__process_namespaces,
1044 .exit = perf_event__process_exit,
1045 .fork = perf_event__process_fork,
1046 .lost = perf_event__process_lost,
1047 .read = process_read_event,
1048 .attr = perf_event__process_attr,
1049 .tracing_data = perf_event__process_tracing_data,
1050 .build_id = perf_event__process_build_id,
1051 .id_index = perf_event__process_id_index,
1052 .auxtrace_info = perf_event__process_auxtrace_info,
1053 .auxtrace = perf_event__process_auxtrace,
1054 .event_update = perf_event__process_event_update,
1055 .feature = process_feature_event,
1056 .ordered_events = true,
1057 .ordering_requires_timestamps = true,
1058 },
1059 .max_stack = PERF_MAX_STACK_DEPTH,
1060 .pretty_printing_style = "normal",
1061 .socket_filter = -1,
1062 .annotation_opts = annotation__default_options,
1063 };
1064 const struct option options[] = {
1065 OPT_STRING('i', "input", &input_name, "file",
1066 "input file name"),
1067 OPT_INCR('v', "verbose", &verbose,
1068 "be more verbose (show symbol address, etc)"),
1069 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
1070 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1071 "dump raw trace in ASCII"),
1072 OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
1073 OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
1074 OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
1075 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1076 "file", "vmlinux pathname"),
1077 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1078 "don't load vmlinux even if found"),
1079 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1080 "file", "kallsyms pathname"),
1081 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
1082 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
1083 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1084 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1085 "Show a column with the number of samples"),
1086 OPT_BOOLEAN('T', "threads", &report.show_threads,
1087 "Show per-thread event counters"),
1088 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
1089 "pretty printing style key: normal raw"),
1090 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
1091 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
1092 OPT_BOOLEAN(0, "stdio", &report.use_stdio,
1093 "Use the stdio interface"),
1094 OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
1095 OPT_BOOLEAN(0, "header-only", &report.header_only,
1096 "Show only data header."),
1097 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1098 sort_help("sort by key(s):")),
1099 OPT_STRING('F', "fields", &field_order, "key[,keys...]",
1100 sort_help("output field(s): overhead period sample ")),
1101 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
1102 "Show sample percentage for different cpu modes"),
1103 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
1104 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
1105 OPT_STRING('p', "parent", &parent_pattern, "regex",
1106 "regex filter to identify parent, see: '--sort parent'"),
1107 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
1108 "Only display entries with parent-match"),
1109 OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
1110 "print_type,threshold[,print_limit],order,sort_key[,branch],value",
1111 report_callchain_help, &report_parse_callchain_opt,
1112 callchain_default_opt),
1113 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1114 "Accumulate callchains of children and show total overhead as well"),
1115 OPT_INTEGER(0, "max-stack", &report.max_stack,
1116 "Set the maximum stack depth when parsing the callchain, "
1117 "anything beyond the specified depth will be ignored. "
1118 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1119 OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
1120 "alias for inverted call graph"),
1121 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1122 "ignore callees of these functions in call graphs",
1123 report_parse_ignore_callees_opt),
1124 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1125 "only consider symbols in these dsos"),
1126 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1127 "only consider symbols in these comms"),
1128 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
1129 "only consider symbols in these pids"),
1130 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
1131 "only consider symbols in these tids"),
1132 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1133 "only consider these symbols"),
1134 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
1135 "only show symbols that (partially) match with this filter"),
1136 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1137 "width[,width...]",
1138 "don't try to adjust column width, use these fixed values"),
1139 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
1140 "separator for columns, no spaces will be added between "
1141 "columns '.' is reserved."),
1142 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
1143 "Only display entries resolved to a symbol"),
1144 OPT_CALLBACK(0, "symfs", NULL, "directory",
1145 "Look for files with symbols relative to this directory",
1146 symbol__config_symfs),
1147 OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
1148 "list of cpus to profile"),
1149 OPT_BOOLEAN('I', "show-info", &report.show_full_info,
1150 "Display extended information about perf.data file"),
1151 OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
1152 "Interleave source code with assembly code (default)"),
1153 OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
1154 "Display raw encoding of assembly instructions (default)"),
1155 OPT_STRING('M', "disassembler-style", &report.annotation_opts.disassembler_style, "disassembler style",
1156 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1157 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1158 "Show a column with the sum of periods"),
1159 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
1160 "Show event group information together"),
1161 OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
1162 "use branch records for per branch histogram filling",
1163 parse_branch_mode),
1164 OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1165 "add last branch records to call history"),
1166 OPT_STRING(0, "objdump", &report.annotation_opts.objdump_path, "path",
1167 "objdump binary to use for disassembly and annotations"),
1168 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
1169 "Disable symbol demangling"),
1170 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1171 "Enable kernel symbol demangling"),
1172 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
1173 OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
1174 "Number of samples to save per histogram entry for individual browsing"),
1175 OPT_CALLBACK(0, "percent-limit", &report, "percent",
1176 "Don't show entries under that percent", parse_percent_limit),
1177 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1178 "how to display percentage of filtered entries", parse_filter_percentage),
1179 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
1180 "Instruction Tracing options\n" ITRACE_HELP,
1181 itrace_parse_synth_opts),
1182 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
1183 "Show full source file name path for source lines"),
1184 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
1185 "Show callgraph from reference event"),
1186 OPT_INTEGER(0, "socket-filter", &report.socket_filter,
1187 "only show processor socket that match with this filter"),
1188 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1189 "Show raw trace event output (do not use print fmt or plugins)"),
1190 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1191 "Show entries in a hierarchy"),
1192 OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
1193 "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
1194 stdio__config_color, "always"),
1195 OPT_STRING(0, "time", &report.time_str, "str",
1196 "Time span of interest (start,stop)"),
1197 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
1198 "Show inline function"),
1199 OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
1200 "Set percent type local/global-period/hits",
1201 annotate_parse_percent_type),
1202 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
1203 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
1204 "Set time quantum for time sort key (default 100ms)",
1205 parse_time_quantum),
1206 OPTS_EVSWITCH(&report.evswitch),
1207 OPT_END()
1208 };
1209 struct perf_data data = {
1210 .mode = PERF_DATA_MODE_READ,
1211 };
1212 int ret = hists__init();
1213 char sort_tmp[128];
1214
1215 if (ret < 0)
1216 return ret;
1217
1218 ret = perf_config(report__config, &report);
1219 if (ret)
1220 return ret;
1221
1222 argc = parse_options(argc, argv, options, report_usage, 0);
1223 if (argc) {
1224 /*
1225 * Special case: if there's an argument left then assume that
1226 * it's a symbol filter:
1227 */
1228 if (argc > 1)
1229 usage_with_options(report_usage, options);
1230
1231 report.symbol_filter_str = argv[0];
1232 }
1233
1234 if (report.mmaps_mode)
1235 report.tasks_mode = true;
1236
1237 if (quiet)
1238 perf_quiet_option();
1239
1240 if (symbol_conf.vmlinux_name &&
1241 access(symbol_conf.vmlinux_name, R_OK)) {
1242 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
1243 return -EINVAL;
1244 }
1245 if (symbol_conf.kallsyms_name &&
1246 access(symbol_conf.kallsyms_name, R_OK)) {
1247 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
1248 return -EINVAL;
1249 }
1250
1251 if (report.inverted_callchain)
1252 callchain_param.order = ORDER_CALLER;
1253 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1254 callchain_param.order = ORDER_CALLER;
1255
1256 if (itrace_synth_opts.callchain &&
1257 (int)itrace_synth_opts.callchain_sz > report.max_stack)
1258 report.max_stack = itrace_synth_opts.callchain_sz;
1259
1260 if (!input_name || !strlen(input_name)) {
1261 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1262 input_name = "-";
1263 else
1264 input_name = "perf.data";
1265 }
1266
1267 data.path = input_name;
1268 data.force = symbol_conf.force;
1269
1270repeat:
1271 session = perf_session__new(&data, false, &report.tool);
1272 if (IS_ERR(session))
1273 return PTR_ERR(session);
1274
1275 ret = evswitch__init(&report.evswitch, session->evlist, stderr);
1276 if (ret)
1277 return ret;
1278
1279 if (zstd_init(&(session->zstd_data), 0) < 0)
1280 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
1281
1282 if (report.queue_size) {
1283 ordered_events__set_alloc_size(&session->ordered_events,
1284 report.queue_size);
1285 }
1286
1287 session->itrace_synth_opts = &itrace_synth_opts;
1288
1289 report.session = session;
1290
1291 has_br_stack = perf_header__has_feat(&session->header,
1292 HEADER_BRANCH_STACK);
1293 if (perf_evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
1294 has_br_stack = false;
1295
1296 setup_forced_leader(&report, session->evlist);
1297
1298 if (itrace_synth_opts.last_branch)
1299 has_br_stack = true;
1300
1301 if (has_br_stack && branch_call_mode)
1302 symbol_conf.show_branchflag_count = true;
1303
1304 memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat));
1305
1306 /*
1307 * Branch mode is a tristate:
1308 * -1 means default, so decide based on the file having branch data.
1309 * 0/1 means the user chose a mode.
1310 */
1311 if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
1312 !branch_call_mode) {
1313 sort__mode = SORT_MODE__BRANCH;
1314 symbol_conf.cumulate_callchain = false;
1315 }
1316 if (branch_call_mode) {
1317 callchain_param.key = CCKEY_ADDRESS;
1318 callchain_param.branch_callstack = 1;
1319 symbol_conf.use_callchain = true;
1320 callchain_register_param(&callchain_param);
1321 if (sort_order == NULL)
1322 sort_order = "srcline,symbol,dso";
1323 }
1324
1325 if (report.mem_mode) {
1326 if (sort__mode == SORT_MODE__BRANCH) {
1327 pr_err("branch and mem mode incompatible\n");
1328 goto error;
1329 }
1330 sort__mode = SORT_MODE__MEMORY;
1331 symbol_conf.cumulate_callchain = false;
1332 }
1333
1334 if (symbol_conf.report_hierarchy) {
1335 /* disable incompatible options */
1336 symbol_conf.cumulate_callchain = false;
1337
1338 if (field_order) {
1339 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1340 parse_options_usage(report_usage, options, "F", 1);
1341 parse_options_usage(NULL, options, "hierarchy", 0);
1342 goto error;
1343 }
1344
1345 perf_hpp_list.need_collapse = true;
1346 }
1347
1348 if (report.use_stdio)
1349 use_browser = 0;
1350 else if (report.use_tui)
1351 use_browser = 1;
1352 else if (report.use_gtk)
1353 use_browser = 2;
1354
1355 /* Force tty output for header output and per-thread stat. */
1356 if (report.header || report.header_only || report.show_threads)
1357 use_browser = 0;
1358 if (report.header || report.header_only)
1359 report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
1360 if (report.show_full_info)
1361 report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
1362 if (report.stats_mode || report.tasks_mode)
1363 use_browser = 0;
1364 if (report.stats_mode && report.tasks_mode) {
1365 pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
1366 goto error;
1367 }
1368
1369 if (strcmp(input_name, "-") != 0)
1370 setup_browser(true);
1371 else
1372 use_browser = 0;
1373
1374 if (sort_order && strstr(sort_order, "ipc")) {
1375 parse_options_usage(report_usage, options, "s", 1);
1376 goto error;
1377 }
1378
1379 if (sort_order && strstr(sort_order, "symbol")) {
1380 if (sort__mode == SORT_MODE__BRANCH) {
1381 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1382 sort_order, "ipc_lbr");
1383 report.symbol_ipc = true;
1384 } else {
1385 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1386 sort_order, "ipc_null");
1387 }
1388
1389 sort_order = sort_tmp;
1390 }
1391
1392 if (setup_sorting(session->evlist) < 0) {
1393 if (sort_order)
1394 parse_options_usage(report_usage, options, "s", 1);
1395 if (field_order)
1396 parse_options_usage(sort_order ? NULL : report_usage,
1397 options, "F", 1);
1398 goto error;
1399 }
1400
1401 if ((report.header || report.header_only) && !quiet) {
1402 perf_session__fprintf_info(session, stdout,
1403 report.show_full_info);
1404 if (report.header_only) {
1405 ret = 0;
1406 goto error;
1407 }
1408 } else if (use_browser == 0 && !quiet &&
1409 !report.stats_mode && !report.tasks_mode) {
1410 fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
1411 stdout);
1412 }
1413
1414 /*
1415 * Only in the TUI browser we are doing integrated annotation,
1416 * so don't allocate extra space that won't be used in the stdio
1417 * implementation.
1418 */
1419 if (ui__has_annotation() || report.symbol_ipc) {
1420 ret = symbol__annotation_init();
1421 if (ret < 0)
1422 goto error;
1423 /*
1424 * For searching by name on the "Browse map details".
1425 * providing it only in verbose mode not to bloat too
1426 * much struct symbol.
1427 */
1428 if (verbose > 0) {
1429 /*
1430 * XXX: Need to provide a less kludgy way to ask for
1431 * more space per symbol, the u32 is for the index on
1432 * the ui browser.
1433 * See symbol__browser_index.
1434 */
1435 symbol_conf.priv_size += sizeof(u32);
1436 symbol_conf.sort_by_name = true;
1437 }
1438 annotation_config__init();
1439 }
1440
1441 if (symbol__init(&session->header.env) < 0)
1442 goto error;
1443
1444 if (report.time_str) {
1445 ret = perf_time__parse_for_ranges(report.time_str, session,
1446 &report.ptime_range,
1447 &report.range_size,
1448 &report.range_num);
1449 if (ret < 0)
1450 goto error;
1451
1452 itrace_synth_opts__set_time_range(&itrace_synth_opts,
1453 report.ptime_range,
1454 report.range_num);
1455 }
1456
1457 if (session->tevent.pevent &&
1458 tep_set_function_resolver(session->tevent.pevent,
1459 machine__resolve_kernel_addr,
1460 &session->machines.host) < 0) {
1461 pr_err("%s: failed to set libtraceevent function resolver\n",
1462 __func__);
1463 return -1;
1464 }
1465
1466 sort__setup_elide(stdout);
1467
1468 ret = __cmd_report(&report);
1469 if (ret == K_SWITCH_INPUT_DATA) {
1470 perf_session__delete(session);
1471 goto repeat;
1472 } else
1473 ret = 0;
1474
1475error:
1476 if (report.ptime_range) {
1477 itrace_synth_opts__clear_time_range(&itrace_synth_opts);
1478 zfree(&report.ptime_range);
1479 }
1480 zstd_fini(&(session->zstd_data));
1481 perf_session__delete(session);
1482 return ret;
1483}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * builtin-report.c
4 *
5 * Builtin report command: Analyze the perf.data input file,
6 * look up and read DSOs and symbol information and display
7 * a histogram of results, along various sorting keys.
8 */
9#include "builtin.h"
10
11#include "util/config.h"
12
13#include "util/annotate.h"
14#include "util/color.h"
15#include "util/dso.h"
16#include <linux/list.h>
17#include <linux/rbtree.h>
18#include <linux/err.h>
19#include <linux/zalloc.h>
20#include "util/map.h"
21#include "util/symbol.h"
22#include "util/map_symbol.h"
23#include "util/mem-events.h"
24#include "util/branch.h"
25#include "util/callchain.h"
26#include "util/values.h"
27
28#include "perf.h"
29#include "util/debug.h"
30#include "util/evlist.h"
31#include "util/evsel.h"
32#include "util/evswitch.h"
33#include "util/header.h"
34#include "util/mem-info.h"
35#include "util/session.h"
36#include "util/srcline.h"
37#include "util/tool.h"
38
39#include <subcmd/parse-options.h>
40#include <subcmd/exec-cmd.h>
41#include "util/parse-events.h"
42
43#include "util/thread.h"
44#include "util/sort.h"
45#include "util/hist.h"
46#include "util/data.h"
47#include "arch/common.h"
48#include "util/time-utils.h"
49#include "util/auxtrace.h"
50#include "util/units.h"
51#include "util/util.h" // perf_tip()
52#include "ui/ui.h"
53#include "ui/progress.h"
54#include "util/block-info.h"
55
56#include <dlfcn.h>
57#include <errno.h>
58#include <inttypes.h>
59#include <regex.h>
60#include <linux/ctype.h>
61#include <signal.h>
62#include <linux/bitmap.h>
63#include <linux/list_sort.h>
64#include <linux/string.h>
65#include <linux/stringify.h>
66#include <linux/time64.h>
67#include <sys/types.h>
68#include <sys/stat.h>
69#include <unistd.h>
70#include <linux/mman.h>
71
72#ifdef HAVE_LIBTRACEEVENT
73#include <event-parse.h>
74#endif
75
76struct report {
77 struct perf_tool tool;
78 struct perf_session *session;
79 struct evswitch evswitch;
80#ifdef HAVE_SLANG_SUPPORT
81 bool use_tui;
82#endif
83#ifdef HAVE_GTK2_SUPPORT
84 bool use_gtk;
85#endif
86 bool use_stdio;
87 bool show_full_info;
88 bool show_threads;
89 bool inverted_callchain;
90 bool mem_mode;
91 bool stats_mode;
92 bool tasks_mode;
93 bool mmaps_mode;
94 bool header;
95 bool header_only;
96 bool nonany_branch_mode;
97 bool group_set;
98 bool stitch_lbr;
99 bool disable_order;
100 bool skip_empty;
101 bool data_type;
102 int max_stack;
103 struct perf_read_values show_threads_values;
104 const char *pretty_printing_style;
105 const char *cpu_list;
106 const char *symbol_filter_str;
107 const char *time_str;
108 struct perf_time_interval *ptime_range;
109 int range_size;
110 int range_num;
111 float min_percent;
112 u64 nr_entries;
113 u64 queue_size;
114 u64 total_cycles;
115 int socket_filter;
116 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
117 struct branch_type_stat brtype_stat;
118 bool symbol_ipc;
119 bool total_cycles_mode;
120 struct block_report *block_reports;
121 int nr_block_reports;
122};
123
124static int report__config(const char *var, const char *value, void *cb)
125{
126 struct report *rep = cb;
127
128 if (!strcmp(var, "report.group")) {
129 symbol_conf.event_group = perf_config_bool(var, value);
130 return 0;
131 }
132 if (!strcmp(var, "report.percent-limit")) {
133 double pcnt = strtof(value, NULL);
134
135 rep->min_percent = pcnt;
136 callchain_param.min_percent = pcnt;
137 return 0;
138 }
139 if (!strcmp(var, "report.children")) {
140 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
141 return 0;
142 }
143 if (!strcmp(var, "report.queue-size"))
144 return perf_config_u64(&rep->queue_size, var, value);
145
146 if (!strcmp(var, "report.sort_order")) {
147 default_sort_order = strdup(value);
148 if (!default_sort_order) {
149 pr_err("Not enough memory for report.sort_order\n");
150 return -1;
151 }
152 return 0;
153 }
154
155 if (!strcmp(var, "report.skip-empty")) {
156 rep->skip_empty = perf_config_bool(var, value);
157 return 0;
158 }
159
160 pr_debug("%s variable unknown, ignoring...", var);
161 return 0;
162}
163
164static int hist_iter__report_callback(struct hist_entry_iter *iter,
165 struct addr_location *al, bool single,
166 void *arg)
167{
168 int err = 0;
169 struct report *rep = arg;
170 struct hist_entry *he = iter->he;
171 struct evsel *evsel = iter->evsel;
172 struct perf_sample *sample = iter->sample;
173 struct mem_info *mi;
174 struct branch_info *bi;
175
176 if (!ui__has_annotation() && !rep->symbol_ipc)
177 return 0;
178
179 if (sort__mode == SORT_MODE__BRANCH) {
180 bi = he->branch_info;
181 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
182 if (err)
183 goto out;
184
185 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
186
187 } else if (rep->mem_mode) {
188 mi = he->mem_info;
189 err = addr_map_symbol__inc_samples(mem_info__daddr(mi), sample, evsel);
190 if (err)
191 goto out;
192
193 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
194
195 } else if (symbol_conf.cumulate_callchain) {
196 if (single)
197 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
198 } else {
199 err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
200 }
201
202out:
203 return err;
204}
205
206static int hist_iter__branch_callback(struct hist_entry_iter *iter,
207 struct addr_location *al __maybe_unused,
208 bool single __maybe_unused,
209 void *arg)
210{
211 struct hist_entry *he = iter->he;
212 struct report *rep = arg;
213 struct branch_info *bi = he->branch_info;
214 struct perf_sample *sample = iter->sample;
215 struct evsel *evsel = iter->evsel;
216 int err;
217
218 branch_type_count(&rep->brtype_stat, &bi->flags,
219 bi->from.addr, bi->to.addr);
220
221 if (!ui__has_annotation() && !rep->symbol_ipc)
222 return 0;
223
224 err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
225 if (err)
226 goto out;
227
228 err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
229
230out:
231 return err;
232}
233
234static void setup_forced_leader(struct report *report,
235 struct evlist *evlist)
236{
237 if (report->group_set)
238 evlist__force_leader(evlist);
239}
240
241static int process_feature_event(struct perf_session *session,
242 union perf_event *event)
243{
244 struct report *rep = container_of(session->tool, struct report, tool);
245
246 if (event->feat.feat_id < HEADER_LAST_FEATURE)
247 return perf_event__process_feature(session, event);
248
249 if (event->feat.feat_id != HEADER_LAST_FEATURE) {
250 pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
251 event->feat.feat_id);
252 return -1;
253 } else if (rep->header_only) {
254 session_done = 1;
255 }
256
257 /*
258 * (feat_id = HEADER_LAST_FEATURE) is the end marker which
259 * means all features are received, now we can force the
260 * group if needed.
261 */
262 setup_forced_leader(rep, session->evlist);
263 return 0;
264}
265
266static int process_sample_event(const struct perf_tool *tool,
267 union perf_event *event,
268 struct perf_sample *sample,
269 struct evsel *evsel,
270 struct machine *machine)
271{
272 struct report *rep = container_of(tool, struct report, tool);
273 struct addr_location al;
274 struct hist_entry_iter iter = {
275 .evsel = evsel,
276 .sample = sample,
277 .hide_unresolved = symbol_conf.hide_unresolved,
278 .add_entry_cb = hist_iter__report_callback,
279 };
280 int ret = 0;
281
282 if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
283 sample->time)) {
284 return 0;
285 }
286
287 if (evswitch__discard(&rep->evswitch, evsel))
288 return 0;
289
290 addr_location__init(&al);
291 if (machine__resolve(machine, &al, sample) < 0) {
292 pr_debug("problem processing %d event, skipping it.\n",
293 event->header.type);
294 ret = -1;
295 goto out_put;
296 }
297
298 if (rep->stitch_lbr)
299 thread__set_lbr_stitch_enable(al.thread, true);
300
301 if (symbol_conf.hide_unresolved && al.sym == NULL)
302 goto out_put;
303
304 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
305 goto out_put;
306
307 if (sort__mode == SORT_MODE__BRANCH) {
308 /*
309 * A non-synthesized event might not have a branch stack if
310 * branch stacks have been synthesized (using itrace options).
311 */
312 if (!sample->branch_stack)
313 goto out_put;
314
315 iter.add_entry_cb = hist_iter__branch_callback;
316 iter.ops = &hist_iter_branch;
317 } else if (rep->mem_mode) {
318 iter.ops = &hist_iter_mem;
319 } else if (symbol_conf.cumulate_callchain) {
320 iter.ops = &hist_iter_cumulative;
321 } else {
322 iter.ops = &hist_iter_normal;
323 }
324
325 if (al.map != NULL)
326 dso__set_hit(map__dso(al.map));
327
328 if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
329 hist__account_cycles(sample->branch_stack, &al, sample,
330 rep->nonany_branch_mode,
331 &rep->total_cycles, evsel);
332 }
333
334 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
335 if (ret < 0)
336 pr_debug("problem adding hist entry, skipping event\n");
337out_put:
338 addr_location__exit(&al);
339 return ret;
340}
341
342static int process_read_event(const struct perf_tool *tool,
343 union perf_event *event,
344 struct perf_sample *sample __maybe_unused,
345 struct evsel *evsel,
346 struct machine *machine __maybe_unused)
347{
348 struct report *rep = container_of(tool, struct report, tool);
349
350 if (rep->show_threads) {
351 const char *name = evsel__name(evsel);
352 int err = perf_read_values_add_value(&rep->show_threads_values,
353 event->read.pid, event->read.tid,
354 evsel->core.idx,
355 name,
356 event->read.value);
357
358 if (err)
359 return err;
360 }
361
362 return 0;
363}
364
365/* For pipe mode, sample_type is not currently set */
366static int report__setup_sample_type(struct report *rep)
367{
368 struct perf_session *session = rep->session;
369 u64 sample_type = evlist__combined_sample_type(session->evlist);
370 bool is_pipe = perf_data__is_pipe(session->data);
371 struct evsel *evsel;
372
373 if (session->itrace_synth_opts->callchain ||
374 session->itrace_synth_opts->add_callchain ||
375 (!is_pipe &&
376 perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
377 !session->itrace_synth_opts->set))
378 sample_type |= PERF_SAMPLE_CALLCHAIN;
379
380 if (session->itrace_synth_opts->last_branch ||
381 session->itrace_synth_opts->add_last_branch)
382 sample_type |= PERF_SAMPLE_BRANCH_STACK;
383
384 if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
385 if (perf_hpp_list.parent) {
386 ui__error("Selected --sort parent, but no "
387 "callchain data. Did you call "
388 "'perf record' without -g?\n");
389 return -EINVAL;
390 }
391 if (symbol_conf.use_callchain &&
392 !symbol_conf.show_branchflag_count) {
393 ui__error("Selected -g or --branch-history.\n"
394 "But no callchain or branch data.\n"
395 "Did you call 'perf record' without -g or -b?\n");
396 return -1;
397 }
398 } else if (!callchain_param.enabled &&
399 callchain_param.mode != CHAIN_NONE &&
400 !symbol_conf.use_callchain) {
401 symbol_conf.use_callchain = true;
402 if (callchain_register_param(&callchain_param) < 0) {
403 ui__error("Can't register callchain params.\n");
404 return -EINVAL;
405 }
406 }
407
408 if (symbol_conf.cumulate_callchain) {
409 /* Silently ignore if callchain is missing */
410 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
411 symbol_conf.cumulate_callchain = false;
412 perf_hpp__cancel_cumulate();
413 }
414 }
415
416 if (sort__mode == SORT_MODE__BRANCH) {
417 if (!is_pipe &&
418 !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
419 ui__error("Selected -b but no branch data. "
420 "Did you call perf record without -b?\n");
421 return -1;
422 }
423 }
424
425 if (sort__mode == SORT_MODE__MEMORY) {
426 /*
427 * FIXUP: prior to kernel 5.18, Arm SPE missed to set
428 * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
429 * compatibility, set the bit if it's an old perf data file.
430 */
431 evlist__for_each_entry(session->evlist, evsel) {
432 if (strstr(evsel__name(evsel), "arm_spe") &&
433 !(sample_type & PERF_SAMPLE_DATA_SRC)) {
434 evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
435 sample_type |= PERF_SAMPLE_DATA_SRC;
436 }
437 }
438
439 if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
440 ui__error("Selected --mem-mode but no mem data. "
441 "Did you call perf record without -d?\n");
442 return -1;
443 }
444 }
445
446 callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env));
447
448 if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
449 ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
450 "Please apply --call-graph lbr when recording.\n");
451 rep->stitch_lbr = false;
452 }
453
454 /* ??? handle more cases than just ANY? */
455 if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
456 rep->nonany_branch_mode = true;
457
458#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_LIBDW_SUPPORT)
459 if (dwarf_callchain_users) {
460 ui__warning("Please install libunwind or libdw "
461 "development packages during the perf build.\n");
462 }
463#endif
464
465 return 0;
466}
467
468static void sig_handler(int sig __maybe_unused)
469{
470 session_done = 1;
471}
472
473static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
474 const char *evname, FILE *fp)
475{
476 size_t ret;
477 char unit;
478 unsigned long nr_samples = hists->stats.nr_samples;
479 u64 nr_events = hists->stats.total_period;
480 struct evsel *evsel = hists_to_evsel(hists);
481 char buf[512];
482 size_t size = sizeof(buf);
483 int socked_id = hists->socket_filter;
484
485 if (quiet)
486 return 0;
487
488 if (symbol_conf.filter_relative) {
489 nr_samples = hists->stats.nr_non_filtered_samples;
490 nr_events = hists->stats.total_non_filtered_period;
491 }
492
493 if (evsel__is_group_event(evsel)) {
494 struct evsel *pos;
495
496 evsel__group_desc(evsel, buf, size);
497 evname = buf;
498
499 for_each_group_member(pos, evsel) {
500 const struct hists *pos_hists = evsel__hists(pos);
501
502 if (symbol_conf.filter_relative) {
503 nr_samples += pos_hists->stats.nr_non_filtered_samples;
504 nr_events += pos_hists->stats.total_non_filtered_period;
505 } else {
506 nr_samples += pos_hists->stats.nr_samples;
507 nr_events += pos_hists->stats.total_period;
508 }
509 }
510 }
511
512 nr_samples = convert_unit(nr_samples, &unit);
513 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
514 if (evname != NULL) {
515 ret += fprintf(fp, " of event%s '%s'",
516 evsel->core.nr_members > 1 ? "s" : "", evname);
517 }
518
519 if (rep->time_str)
520 ret += fprintf(fp, " (time slices: %s)", rep->time_str);
521
522 if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
523 ret += fprintf(fp, ", show reference callgraph");
524 }
525
526 if (rep->mem_mode) {
527 ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
528 ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
529 } else
530 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
531
532 if (socked_id > -1)
533 ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
534
535 return ret + fprintf(fp, "\n#\n");
536}
537
538static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep)
539{
540 struct evsel *pos;
541 int i = 0, ret;
542
543 evlist__for_each_entry(evlist, pos) {
544 ret = report__browse_block_hists(&rep->block_reports[i++].hist,
545 rep->min_percent, pos,
546 &rep->session->header.env);
547 if (ret != 0)
548 return ret;
549 }
550
551 return 0;
552}
553
554static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help)
555{
556 struct evsel *pos;
557 int i = 0;
558
559 if (!quiet) {
560 fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
561 evlist->stats.total_lost_samples);
562 }
563
564 evlist__for_each_entry(evlist, pos) {
565 struct hists *hists = evsel__hists(pos);
566 const char *evname = evsel__name(pos);
567
568 i++;
569 if (symbol_conf.event_group && !evsel__is_group_leader(pos))
570 continue;
571
572 if (rep->skip_empty && !hists->stats.nr_samples)
573 continue;
574
575 hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
576
577 if (rep->total_cycles_mode) {
578 char *buf;
579
580 if (!annotation_br_cntr_abbr_list(&buf, pos, true)) {
581 fprintf(stdout, "%s", buf);
582 fprintf(stdout, "#\n");
583 free(buf);
584 }
585 report__browse_block_hists(&rep->block_reports[i - 1].hist,
586 rep->min_percent, pos, NULL);
587 continue;
588 }
589
590 hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
591 !(symbol_conf.use_callchain ||
592 symbol_conf.show_branchflag_count));
593 fprintf(stdout, "\n\n");
594 }
595
596 if (!quiet)
597 fprintf(stdout, "#\n# (%s)\n#\n", help);
598
599 if (rep->show_threads) {
600 bool style = !strcmp(rep->pretty_printing_style, "raw");
601 perf_read_values_display(stdout, &rep->show_threads_values,
602 style);
603 perf_read_values_destroy(&rep->show_threads_values);
604 }
605
606 if (sort__mode == SORT_MODE__BRANCH)
607 branch_type_stat_display(stdout, &rep->brtype_stat);
608
609 return 0;
610}
611
612static void report__warn_kptr_restrict(const struct report *rep)
613{
614 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
615 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
616
617 if (evlist__exclude_kernel(rep->session->evlist))
618 return;
619
620 if (kernel_map == NULL ||
621 (dso__hit(map__dso(kernel_map)) &&
622 (kernel_kmap->ref_reloc_sym == NULL ||
623 kernel_kmap->ref_reloc_sym->addr == 0))) {
624 const char *desc =
625 "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
626 "can't be resolved.";
627
628 if (kernel_map && map__has_symbols(kernel_map)) {
629 desc = "If some relocation was applied (e.g. "
630 "kexec) symbols may be misresolved.";
631 }
632
633 ui__warning(
634"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
635"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
636"Samples in kernel modules can't be resolved as well.\n\n",
637 desc);
638 }
639}
640
641static int report__gtk_browse_hists(struct report *rep, const char *help)
642{
643 int (*hist_browser)(struct evlist *evlist, const char *help,
644 struct hist_browser_timer *timer, float min_pcnt);
645
646 hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists");
647
648 if (hist_browser == NULL) {
649 ui__error("GTK browser not found!\n");
650 return -1;
651 }
652
653 return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
654}
655
656static int report__browse_hists(struct report *rep)
657{
658 int ret;
659 struct perf_session *session = rep->session;
660 struct evlist *evlist = session->evlist;
661 char *help = NULL, *path = NULL;
662
663 path = system_path(TIPDIR);
664 if (perf_tip(&help, path) || help == NULL) {
665 /* fallback for people who don't install perf ;-) */
666 free(path);
667 path = system_path(DOCDIR);
668 if (perf_tip(&help, path) || help == NULL)
669 help = strdup("Cannot load tips.txt file, please install perf!");
670 }
671 free(path);
672
673 switch (use_browser) {
674 case 1:
675 if (rep->total_cycles_mode) {
676 ret = evlist__tui_block_hists_browse(evlist, rep);
677 break;
678 }
679
680 ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
681 &session->header.env, true);
682 /*
683 * Usually "ret" is the last pressed key, and we only
684 * care if the key notifies us to switch data file.
685 */
686 if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD)
687 ret = 0;
688 break;
689 case 2:
690 ret = report__gtk_browse_hists(rep, help);
691 break;
692 default:
693 ret = evlist__tty_browse_hists(evlist, rep, help);
694 break;
695 }
696 free(help);
697 return ret;
698}
699
700static int report__collapse_hists(struct report *rep)
701{
702 struct perf_session *session = rep->session;
703 struct evlist *evlist = session->evlist;
704 struct ui_progress prog;
705 struct evsel *pos;
706 int ret = 0;
707
708 /*
709 * The pipe data needs to setup hierarchy hpp formats now, because it
710 * cannot know about evsels in the data before reading the data. The
711 * normal file data saves the event (attribute) info in the header
712 * section, but pipe does not have the luxury.
713 */
714 if (perf_data__is_pipe(session->data)) {
715 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) {
716 ui__error("Failed to setup hierarchy output formats\n");
717 return -1;
718 }
719 }
720
721 ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
722
723 evlist__for_each_entry(rep->session->evlist, pos) {
724 struct hists *hists = evsel__hists(pos);
725
726 if (pos->core.idx == 0)
727 hists->symbol_filter_str = rep->symbol_filter_str;
728
729 hists->socket_filter = rep->socket_filter;
730
731 ret = hists__collapse_resort(hists, &prog);
732 if (ret < 0)
733 break;
734
735 /* Non-group events are considered as leader */
736 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
737 struct hists *leader_hists = evsel__hists(evsel__leader(pos));
738
739 hists__match(leader_hists, hists);
740 hists__link(leader_hists, hists);
741 }
742 }
743
744 ui_progress__finish();
745 return ret;
746}
747
748static int hists__resort_cb(struct hist_entry *he, void *arg)
749{
750 struct report *rep = arg;
751 struct symbol *sym = he->ms.sym;
752
753 if (rep->symbol_ipc && sym && !sym->annotate2) {
754 struct evsel *evsel = hists_to_evsel(he->hists);
755
756 symbol__annotate2(&he->ms, evsel, NULL);
757 }
758
759 return 0;
760}
761
762static void report__output_resort(struct report *rep)
763{
764 struct ui_progress prog;
765 struct evsel *pos;
766
767 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
768
769 evlist__for_each_entry(rep->session->evlist, pos) {
770 evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
771 }
772
773 ui_progress__finish();
774}
775
776static int count_sample_event(const struct perf_tool *tool __maybe_unused,
777 union perf_event *event __maybe_unused,
778 struct perf_sample *sample __maybe_unused,
779 struct evsel *evsel,
780 struct machine *machine __maybe_unused)
781{
782 struct hists *hists = evsel__hists(evsel);
783
784 hists__inc_nr_events(hists);
785 return 0;
786}
787
788static int count_lost_samples_event(const struct perf_tool *tool,
789 union perf_event *event,
790 struct perf_sample *sample,
791 struct machine *machine __maybe_unused)
792{
793 struct report *rep = container_of(tool, struct report, tool);
794 struct evsel *evsel;
795
796 evsel = evlist__id2evsel(rep->session->evlist, sample->id);
797 if (evsel) {
798 struct hists *hists = evsel__hists(evsel);
799 u32 count = event->lost_samples.lost;
800
801 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
802 hists__inc_nr_dropped_samples(hists, count);
803 else
804 hists__inc_nr_lost_samples(hists, count);
805 }
806 return 0;
807}
808
809static int process_attr(const struct perf_tool *tool __maybe_unused,
810 union perf_event *event,
811 struct evlist **pevlist);
812
813static void stats_setup(struct report *rep)
814{
815 perf_tool__init(&rep->tool, /*ordered_events=*/false);
816 rep->tool.attr = process_attr;
817 rep->tool.sample = count_sample_event;
818 rep->tool.lost_samples = count_lost_samples_event;
819 rep->tool.event_update = perf_event__process_event_update;
820 rep->tool.no_warn = true;
821}
822
823static int stats_print(struct report *rep)
824{
825 struct perf_session *session = rep->session;
826
827 perf_session__fprintf_nr_events(session, stdout);
828 evlist__fprintf_nr_events(session->evlist, stdout);
829 return 0;
830}
831
832static void tasks_setup(struct report *rep)
833{
834 perf_tool__init(&rep->tool, /*ordered_events=*/true);
835 if (rep->mmaps_mode) {
836 rep->tool.mmap = perf_event__process_mmap;
837 rep->tool.mmap2 = perf_event__process_mmap2;
838 }
839 rep->tool.attr = process_attr;
840 rep->tool.comm = perf_event__process_comm;
841 rep->tool.exit = perf_event__process_exit;
842 rep->tool.fork = perf_event__process_fork;
843 rep->tool.no_warn = true;
844}
845
846struct maps__fprintf_task_args {
847 int indent;
848 FILE *fp;
849 size_t printed;
850};
851
852static int maps__fprintf_task_cb(struct map *map, void *data)
853{
854 struct maps__fprintf_task_args *args = data;
855 const struct dso *dso = map__dso(map);
856 u32 prot = map__prot(map);
857 int ret;
858
859 ret = fprintf(args->fp,
860 "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
861 args->indent, "", map__start(map), map__end(map),
862 prot & PROT_READ ? 'r' : '-',
863 prot & PROT_WRITE ? 'w' : '-',
864 prot & PROT_EXEC ? 'x' : '-',
865 map__flags(map) ? 's' : 'p',
866 map__pgoff(map),
867 dso__id_const(dso)->ino, dso__name(dso));
868
869 if (ret < 0)
870 return ret;
871
872 args->printed += ret;
873 return 0;
874}
875
876static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
877{
878 struct maps__fprintf_task_args args = {
879 .indent = indent,
880 .fp = fp,
881 .printed = 0,
882 };
883
884 maps__for_each_map(maps, maps__fprintf_task_cb, &args);
885
886 return args.printed;
887}
888
889static int thread_level(struct machine *machine, const struct thread *thread)
890{
891 struct thread *parent_thread;
892 int res;
893
894 if (thread__tid(thread) <= 0)
895 return 0;
896
897 if (thread__ppid(thread) <= 0)
898 return 1;
899
900 parent_thread = machine__find_thread(machine, -1, thread__ppid(thread));
901 if (!parent_thread) {
902 pr_err("Missing parent thread of %d\n", thread__tid(thread));
903 return 0;
904 }
905 res = 1 + thread_level(machine, parent_thread);
906 thread__put(parent_thread);
907 return res;
908}
909
910static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp)
911{
912 int level = thread_level(machine, thread);
913 int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
914 thread__pid(thread), thread__tid(thread),
915 thread__ppid(thread), level, "");
916
917 fprintf(fp, "%s\n", thread__comm_str(thread));
918
919 maps__fprintf_task(thread__maps(thread), comm_indent, fp);
920}
921
922/*
923 * Sort two thread list nodes such that they form a tree. The first node is the
924 * root of the tree, its children are ordered numerically after it. If a child
925 * has children itself then they appear immediately after their parent. For
926 * example, the 4 threads in the order they'd appear in the list:
927 * - init with a TID 1 and a parent of 0
928 * - systemd with a TID 3000 and a parent of init/1
929 * - systemd child thread with TID 4000, the parent is 3000
930 * - NetworkManager is a child of init with a TID of 3500.
931 */
932static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb)
933{
934 struct machine *machine = priv;
935 struct thread_list *task_a = list_entry(la, struct thread_list, list);
936 struct thread_list *task_b = list_entry(lb, struct thread_list, list);
937 struct thread *a = task_a->thread;
938 struct thread *b = task_b->thread;
939 int level_a, level_b, res;
940
941 /* Same thread? */
942 if (thread__tid(a) == thread__tid(b))
943 return 0;
944
945 /* Compare a and b to root. */
946 if (thread__tid(a) == 0)
947 return -1;
948
949 if (thread__tid(b) == 0)
950 return 1;
951
952 /* If parents match sort by tid. */
953 if (thread__ppid(a) == thread__ppid(b))
954 return thread__tid(a) < thread__tid(b) ? -1 : 1;
955
956 /*
957 * Find a and b such that if they are a child of each other a and b's
958 * tid's match, otherwise a and b have a common parent and distinct
959 * tid's to sort by. First make the depths of the threads match.
960 */
961 level_a = thread_level(machine, a);
962 level_b = thread_level(machine, b);
963 a = thread__get(a);
964 b = thread__get(b);
965 for (int i = level_a; i > level_b; i--) {
966 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a));
967
968 thread__put(a);
969 if (!parent) {
970 pr_err("Missing parent thread of %d\n", thread__tid(a));
971 thread__put(b);
972 return -1;
973 }
974 a = parent;
975 }
976 for (int i = level_b; i > level_a; i--) {
977 struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b));
978
979 thread__put(b);
980 if (!parent) {
981 pr_err("Missing parent thread of %d\n", thread__tid(b));
982 thread__put(a);
983 return 1;
984 }
985 b = parent;
986 }
987 /* Search up to a common parent. */
988 while (thread__ppid(a) != thread__ppid(b)) {
989 struct thread *parent;
990
991 parent = machine__find_thread(machine, -1, thread__ppid(a));
992 thread__put(a);
993 if (!parent)
994 pr_err("Missing parent thread of %d\n", thread__tid(a));
995 a = parent;
996 parent = machine__find_thread(machine, -1, thread__ppid(b));
997 thread__put(b);
998 if (!parent)
999 pr_err("Missing parent thread of %d\n", thread__tid(b));
1000 b = parent;
1001 if (!a || !b) {
1002 /* Handle missing parent (unexpected) with some sanity. */
1003 thread__put(a);
1004 thread__put(b);
1005 return !a && !b ? 0 : (!a ? -1 : 1);
1006 }
1007 }
1008 if (thread__tid(a) == thread__tid(b)) {
1009 /* a is a child of b or vice-versa, deeper levels appear later. */
1010 res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0);
1011 } else {
1012 /* Sort by tid now the parent is the same. */
1013 res = thread__tid(a) < thread__tid(b) ? -1 : 1;
1014 }
1015 thread__put(a);
1016 thread__put(b);
1017 return res;
1018}
1019
1020static int tasks_print(struct report *rep, FILE *fp)
1021{
1022 struct machine *machine = &rep->session->machines.host;
1023 LIST_HEAD(tasks);
1024 int ret;
1025
1026 ret = machine__thread_list(machine, &tasks);
1027 if (!ret) {
1028 struct thread_list *task;
1029
1030 list_sort(machine, &tasks, task_list_cmp);
1031
1032 fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
1033
1034 list_for_each_entry(task, &tasks, list)
1035 task__print_level(machine, task->thread, fp);
1036 }
1037 thread_list__delete(&tasks);
1038 return ret;
1039}
1040
1041static int __cmd_report(struct report *rep)
1042{
1043 int ret;
1044 struct perf_session *session = rep->session;
1045 struct evsel *pos;
1046 struct perf_data *data = session->data;
1047
1048 signal(SIGINT, sig_handler);
1049
1050 if (rep->cpu_list) {
1051 ret = perf_session__cpu_bitmap(session, rep->cpu_list,
1052 rep->cpu_bitmap);
1053 if (ret) {
1054 ui__error("failed to set cpu bitmap\n");
1055 return ret;
1056 }
1057 session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
1058 }
1059
1060 if (rep->show_threads) {
1061 ret = perf_read_values_init(&rep->show_threads_values);
1062 if (ret)
1063 return ret;
1064 }
1065
1066 ret = report__setup_sample_type(rep);
1067 if (ret) {
1068 /* report__setup_sample_type() already showed error message */
1069 return ret;
1070 }
1071
1072 if (rep->stats_mode)
1073 stats_setup(rep);
1074
1075 if (rep->tasks_mode)
1076 tasks_setup(rep);
1077
1078 ret = perf_session__process_events(session);
1079 if (ret) {
1080 ui__error("failed to process sample\n");
1081 return ret;
1082 }
1083
1084 evlist__check_mem_load_aux(session->evlist);
1085
1086 if (rep->stats_mode)
1087 return stats_print(rep);
1088
1089 if (rep->tasks_mode)
1090 return tasks_print(rep, stdout);
1091
1092 report__warn_kptr_restrict(rep);
1093
1094 evlist__for_each_entry(session->evlist, pos)
1095 rep->nr_entries += evsel__hists(pos)->nr_entries;
1096
1097 if (use_browser == 0) {
1098 if (verbose > 3)
1099 perf_session__fprintf(session, stdout);
1100
1101 if (verbose > 2)
1102 perf_session__fprintf_dsos(session, stdout);
1103
1104 if (dump_trace) {
1105 stats_print(rep);
1106 return 0;
1107 }
1108 }
1109
1110 ret = report__collapse_hists(rep);
1111 if (ret) {
1112 ui__error("failed to process hist entry\n");
1113 return ret;
1114 }
1115
1116 if (session_done())
1117 return 0;
1118
1119 /*
1120 * recalculate number of entries after collapsing since it
1121 * might be changed during the collapse phase.
1122 */
1123 rep->nr_entries = 0;
1124 evlist__for_each_entry(session->evlist, pos)
1125 rep->nr_entries += evsel__hists(pos)->nr_entries;
1126
1127 if (rep->nr_entries == 0) {
1128 ui__error("The %s data has no samples!\n", data->path);
1129 return 0;
1130 }
1131
1132 report__output_resort(rep);
1133
1134 if (rep->total_cycles_mode) {
1135 int nr_hpps = 4;
1136 int block_hpps[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
1137 PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
1138 PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
1139 PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
1140 PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
1141 };
1142
1143 if (session->evlist->nr_br_cntr > 0)
1144 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER;
1145
1146 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_RANGE;
1147 block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_DSO;
1148
1149 rep->block_reports = block_info__create_report(session->evlist,
1150 rep->total_cycles,
1151 block_hpps, nr_hpps,
1152 &rep->nr_block_reports);
1153 if (!rep->block_reports)
1154 return -1;
1155 }
1156
1157 return report__browse_hists(rep);
1158}
1159
1160static int
1161report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1162{
1163 struct callchain_param *callchain = opt->value;
1164
1165 callchain->enabled = !unset;
1166 /*
1167 * --no-call-graph
1168 */
1169 if (unset) {
1170 symbol_conf.use_callchain = false;
1171 callchain->mode = CHAIN_NONE;
1172 return 0;
1173 }
1174
1175 return parse_callchain_report_opt(arg);
1176}
1177
1178static int
1179parse_time_quantum(const struct option *opt, const char *arg,
1180 int unset __maybe_unused)
1181{
1182 unsigned long *time_q = opt->value;
1183 char *end;
1184
1185 *time_q = strtoul(arg, &end, 0);
1186 if (end == arg)
1187 goto parse_err;
1188 if (*time_q == 0) {
1189 pr_err("time quantum cannot be 0");
1190 return -1;
1191 }
1192 end = skip_spaces(end);
1193 if (*end == 0)
1194 return 0;
1195 if (!strcmp(end, "s")) {
1196 *time_q *= NSEC_PER_SEC;
1197 return 0;
1198 }
1199 if (!strcmp(end, "ms")) {
1200 *time_q *= NSEC_PER_MSEC;
1201 return 0;
1202 }
1203 if (!strcmp(end, "us")) {
1204 *time_q *= NSEC_PER_USEC;
1205 return 0;
1206 }
1207 if (!strcmp(end, "ns"))
1208 return 0;
1209parse_err:
1210 pr_err("Cannot parse time quantum `%s'\n", arg);
1211 return -1;
1212}
1213
1214int
1215report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
1216 const char *arg, int unset __maybe_unused)
1217{
1218 if (arg) {
1219 int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
1220 if (err) {
1221 char buf[BUFSIZ];
1222 regerror(err, &ignore_callees_regex, buf, sizeof(buf));
1223 pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
1224 return -1;
1225 }
1226 have_ignore_callees = 1;
1227 }
1228
1229 return 0;
1230}
1231
1232static int
1233parse_branch_mode(const struct option *opt,
1234 const char *str __maybe_unused, int unset)
1235{
1236 int *branch_mode = opt->value;
1237
1238 *branch_mode = !unset;
1239 return 0;
1240}
1241
1242static int
1243parse_percent_limit(const struct option *opt, const char *str,
1244 int unset __maybe_unused)
1245{
1246 struct report *rep = opt->value;
1247 double pcnt = strtof(str, NULL);
1248
1249 rep->min_percent = pcnt;
1250 callchain_param.min_percent = pcnt;
1251 return 0;
1252}
1253
1254static int process_attr(const struct perf_tool *tool __maybe_unused,
1255 union perf_event *event,
1256 struct evlist **pevlist)
1257{
1258 u64 sample_type;
1259 int err;
1260
1261 err = perf_event__process_attr(tool, event, pevlist);
1262 if (err)
1263 return err;
1264
1265 /*
1266 * Check if we need to enable callchains based
1267 * on events sample_type.
1268 */
1269 sample_type = evlist__combined_sample_type(*pevlist);
1270 callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
1271 return 0;
1272}
1273
1274#define CALLCHAIN_BRANCH_SORT_ORDER \
1275 "srcline,symbol,dso,callchain_branch_predicted," \
1276 "callchain_branch_abort,callchain_branch_cycles"
1277
1278int cmd_report(int argc, const char **argv)
1279{
1280 struct perf_session *session;
1281 struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
1282 struct stat st;
1283 bool has_br_stack = false;
1284 int branch_mode = -1;
1285 int last_key = 0;
1286 bool branch_call_mode = false;
1287#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
1288 static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
1289 CALLCHAIN_REPORT_HELP
1290 "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
1291 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
1292 const char * const report_usage[] = {
1293 "perf report [<options>]",
1294 NULL
1295 };
1296 struct report report = {
1297 .max_stack = PERF_MAX_STACK_DEPTH,
1298 .pretty_printing_style = "normal",
1299 .socket_filter = -1,
1300 .skip_empty = true,
1301 };
1302 char *sort_order_help = sort_help("sort by key(s):", SORT_MODE__NORMAL);
1303 char *field_order_help = sort_help("output field(s):", SORT_MODE__NORMAL);
1304 const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
1305 const struct option options[] = {
1306 OPT_STRING('i', "input", &input_name, "file",
1307 "input file name"),
1308 OPT_INCR('v', "verbose", &verbose,
1309 "be more verbose (show symbol address, etc)"),
1310 OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
1311 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1312 "dump raw trace in ASCII"),
1313 OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
1314 OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
1315 OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
1316 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1317 "file", "vmlinux pathname"),
1318 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1319 "don't load vmlinux even if found"),
1320 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1321 "file", "kallsyms pathname"),
1322 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
1323 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
1324 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1325 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1326 "Show a column with the number of samples"),
1327 OPT_BOOLEAN('T', "threads", &report.show_threads,
1328 "Show per-thread event counters"),
1329 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
1330 "pretty printing style key: normal raw"),
1331#ifdef HAVE_SLANG_SUPPORT
1332 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
1333#endif
1334#ifdef HAVE_GTK2_SUPPORT
1335 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
1336#endif
1337 OPT_BOOLEAN(0, "stdio", &report.use_stdio,
1338 "Use the stdio interface"),
1339 OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
1340 OPT_BOOLEAN(0, "header-only", &report.header_only,
1341 "Show only data header."),
1342 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1343 sort_order_help),
1344 OPT_STRING('F', "fields", &field_order, "key[,keys...]",
1345 field_order_help),
1346 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
1347 "Show sample percentage for different cpu modes"),
1348 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
1349 "Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
1350 OPT_STRING('p', "parent", &parent_pattern, "regex",
1351 "regex filter to identify parent, see: '--sort parent'"),
1352 OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
1353 "Only display entries with parent-match"),
1354 OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
1355 "print_type,threshold[,print_limit],order,sort_key[,branch],value",
1356 report_callchain_help, &report_parse_callchain_opt,
1357 callchain_default_opt),
1358 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1359 "Accumulate callchains of children and show total overhead as well. "
1360 "Enabled by default, use --no-children to disable."),
1361 OPT_INTEGER(0, "max-stack", &report.max_stack,
1362 "Set the maximum stack depth when parsing the callchain, "
1363 "anything beyond the specified depth will be ignored. "
1364 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1365 OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
1366 "alias for inverted call graph"),
1367 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1368 "ignore callees of these functions in call graphs",
1369 report_parse_ignore_callees_opt),
1370 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1371 "only consider symbols in these dsos"),
1372 OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1373 "only consider symbols in these comms"),
1374 OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
1375 "only consider symbols in these pids"),
1376 OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
1377 "only consider symbols in these tids"),
1378 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1379 "only consider these symbols"),
1380 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
1381 "only show symbols that (partially) match with this filter"),
1382 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1383 "width[,width...]",
1384 "don't try to adjust column width, use these fixed values"),
1385 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
1386 "separator for columns, no spaces will be added between "
1387 "columns '.' is reserved."),
1388 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
1389 "Only display entries resolved to a symbol"),
1390 OPT_CALLBACK(0, "symfs", NULL, "directory",
1391 "Look for files with symbols relative to this directory",
1392 symbol__config_symfs),
1393 OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
1394 "list of cpus to profile"),
1395 OPT_BOOLEAN('I', "show-info", &report.show_full_info,
1396 "Display extended information about perf.data file"),
1397 OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
1398 "Interleave source code with assembly code (default)"),
1399 OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
1400 "Display raw encoding of assembly instructions (default)"),
1401 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1402 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1403 OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
1404 "Add prefix to source file path names in programs (with --prefix-strip)"),
1405 OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
1406 "Strip first N entries of source file path name in programs (with --prefix)"),
1407 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1408 "Show a column with the sum of periods"),
1409 OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
1410 "Show event group information together"),
1411 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1412 "Sort the output by the event at the index n in group. "
1413 "If n is invalid, sort by the first event. "
1414 "WARNING: should be used on grouped events."),
1415 OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
1416 "use branch records for per branch histogram filling",
1417 parse_branch_mode),
1418 OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1419 "add last branch records to call history"),
1420 OPT_STRING(0, "objdump", &objdump_path, "path",
1421 "objdump binary to use for disassembly and annotations"),
1422 OPT_STRING(0, "addr2line", &addr2line_path, "path",
1423 "addr2line binary to use for line numbers"),
1424 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
1425 "Symbol demangling. Enabled by default, use --no-demangle to disable."),
1426 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1427 "Enable kernel symbol demangling"),
1428 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
1429 OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
1430 "Number of samples to save per histogram entry for individual browsing"),
1431 OPT_CALLBACK(0, "percent-limit", &report, "percent",
1432 "Don't show entries under that percent", parse_percent_limit),
1433 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1434 "how to display percentage of filtered entries", parse_filter_percentage),
1435 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
1436 "Instruction Tracing options\n" ITRACE_HELP,
1437 itrace_parse_synth_opts),
1438 OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
1439 "Show full source file name path for source lines"),
1440 OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
1441 "Show callgraph from reference event"),
1442 OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
1443 "Enable LBR callgraph stitching approach"),
1444 OPT_INTEGER(0, "socket-filter", &report.socket_filter,
1445 "only show processor socket that match with this filter"),
1446 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1447 "Show raw trace event output (do not use print fmt or plugins)"),
1448 OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy,
1449 "Show entries in a hierarchy"),
1450 OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
1451 "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
1452 stdio__config_color, "always"),
1453 OPT_STRING(0, "time", &report.time_str, "str",
1454 "Time span of interest (start,stop)"),
1455 OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
1456 "Show inline function"),
1457 OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
1458 "Set percent type local/global-period/hits",
1459 annotate_parse_percent_type),
1460 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
1461 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
1462 "Set time quantum for time sort key (default 100ms)",
1463 parse_time_quantum),
1464 OPTS_EVSWITCH(&report.evswitch),
1465 OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
1466 "Sort all blocks by 'Sampled Cycles%'"),
1467 OPT_BOOLEAN(0, "disable-order", &report.disable_order,
1468 "Disable raw trace ordering"),
1469 OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
1470 "Do not display empty (or dummy) events in the output"),
1471 OPT_END()
1472 };
1473 struct perf_data data = {
1474 .mode = PERF_DATA_MODE_READ,
1475 };
1476 int ret = hists__init();
1477 char sort_tmp[128];
1478 bool ordered_events = true;
1479
1480 if (ret < 0)
1481 goto exit;
1482
1483 /*
1484 * tasks_mode require access to exited threads to list those that are in
1485 * the data file. Off-cpu events are synthesized after other events and
1486 * reference exited threads.
1487 */
1488 symbol_conf.keep_exited_threads = true;
1489
1490 annotation_options__init();
1491
1492 ret = perf_config(report__config, &report);
1493 if (ret)
1494 goto exit;
1495
1496 argc = parse_options(argc, argv, options, report_usage, 0);
1497 if (argc) {
1498 /*
1499 * Special case: if there's an argument left then assume that
1500 * it's a symbol filter:
1501 */
1502 if (argc > 1)
1503 usage_with_options(report_usage, options);
1504
1505 report.symbol_filter_str = argv[0];
1506 }
1507
1508 if (disassembler_style) {
1509 annotate_opts.disassembler_style = strdup(disassembler_style);
1510 if (!annotate_opts.disassembler_style)
1511 return -ENOMEM;
1512 }
1513 if (objdump_path) {
1514 annotate_opts.objdump_path = strdup(objdump_path);
1515 if (!annotate_opts.objdump_path)
1516 return -ENOMEM;
1517 }
1518 if (addr2line_path) {
1519 symbol_conf.addr2line_path = strdup(addr2line_path);
1520 if (!symbol_conf.addr2line_path)
1521 return -ENOMEM;
1522 }
1523
1524 if (annotate_check_args() < 0) {
1525 ret = -EINVAL;
1526 goto exit;
1527 }
1528
1529 if (report.mmaps_mode)
1530 report.tasks_mode = true;
1531
1532 if (dump_trace && report.disable_order)
1533 ordered_events = false;
1534
1535 if (quiet)
1536 perf_quiet_option();
1537
1538 ret = symbol__validate_sym_arguments();
1539 if (ret)
1540 goto exit;
1541
1542 if (report.inverted_callchain)
1543 callchain_param.order = ORDER_CALLER;
1544 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1545 callchain_param.order = ORDER_CALLER;
1546
1547 if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
1548 (int)itrace_synth_opts.callchain_sz > report.max_stack)
1549 report.max_stack = itrace_synth_opts.callchain_sz;
1550
1551 if (!input_name || !strlen(input_name)) {
1552 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1553 input_name = "-";
1554 else
1555 input_name = "perf.data";
1556 }
1557
1558 data.path = input_name;
1559 data.force = symbol_conf.force;
1560
1561 symbol_conf.skip_empty = report.skip_empty;
1562
1563repeat:
1564 perf_tool__init(&report.tool, ordered_events);
1565 report.tool.sample = process_sample_event;
1566 report.tool.mmap = perf_event__process_mmap;
1567 report.tool.mmap2 = perf_event__process_mmap2;
1568 report.tool.comm = perf_event__process_comm;
1569 report.tool.namespaces = perf_event__process_namespaces;
1570 report.tool.cgroup = perf_event__process_cgroup;
1571 report.tool.exit = perf_event__process_exit;
1572 report.tool.fork = perf_event__process_fork;
1573 report.tool.lost = perf_event__process_lost;
1574 report.tool.read = process_read_event;
1575 report.tool.attr = process_attr;
1576#ifdef HAVE_LIBTRACEEVENT
1577 report.tool.tracing_data = perf_event__process_tracing_data;
1578#endif
1579 report.tool.build_id = perf_event__process_build_id;
1580 report.tool.id_index = perf_event__process_id_index;
1581 report.tool.auxtrace_info = perf_event__process_auxtrace_info;
1582 report.tool.auxtrace = perf_event__process_auxtrace;
1583 report.tool.event_update = perf_event__process_event_update;
1584 report.tool.feature = process_feature_event;
1585 report.tool.ordering_requires_timestamps = true;
1586
1587 session = perf_session__new(&data, &report.tool);
1588 if (IS_ERR(session)) {
1589 ret = PTR_ERR(session);
1590 goto exit;
1591 }
1592
1593 ret = evswitch__init(&report.evswitch, session->evlist, stderr);
1594 if (ret)
1595 goto exit;
1596
1597 if (zstd_init(&(session->zstd_data), 0) < 0)
1598 pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
1599
1600 if (report.queue_size) {
1601 ordered_events__set_alloc_size(&session->ordered_events,
1602 report.queue_size);
1603 }
1604
1605 session->itrace_synth_opts = &itrace_synth_opts;
1606
1607 report.session = session;
1608
1609 has_br_stack = perf_header__has_feat(&session->header,
1610 HEADER_BRANCH_STACK);
1611 if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
1612 has_br_stack = false;
1613
1614 setup_forced_leader(&report, session->evlist);
1615
1616 if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) {
1617 parse_options_usage(NULL, options, "group-sort-idx", 0);
1618 ret = -EINVAL;
1619 goto error;
1620 }
1621
1622 if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
1623 has_br_stack = true;
1624
1625 if (has_br_stack && branch_call_mode)
1626 symbol_conf.show_branchflag_count = true;
1627
1628 memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat));
1629
1630 /*
1631 * Branch mode is a tristate:
1632 * -1 means default, so decide based on the file having branch data.
1633 * 0/1 means the user chose a mode.
1634 */
1635 if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
1636 !branch_call_mode) {
1637 sort__mode = SORT_MODE__BRANCH;
1638 symbol_conf.cumulate_callchain = false;
1639 }
1640 if (branch_call_mode) {
1641 callchain_param.key = CCKEY_ADDRESS;
1642 callchain_param.branch_callstack = true;
1643 symbol_conf.use_callchain = true;
1644 callchain_register_param(&callchain_param);
1645 if (sort_order == NULL)
1646 sort_order = CALLCHAIN_BRANCH_SORT_ORDER;
1647 }
1648
1649 if (report.mem_mode) {
1650 if (sort__mode == SORT_MODE__BRANCH) {
1651 pr_err("branch and mem mode incompatible\n");
1652 goto error;
1653 }
1654 sort__mode = SORT_MODE__MEMORY;
1655 symbol_conf.cumulate_callchain = false;
1656 }
1657
1658 if (symbol_conf.report_hierarchy) {
1659 /* disable incompatible options */
1660 symbol_conf.cumulate_callchain = false;
1661
1662 if (field_order) {
1663 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1664 parse_options_usage(report_usage, options, "F", 1);
1665 parse_options_usage(NULL, options, "hierarchy", 0);
1666 goto error;
1667 }
1668
1669 perf_hpp_list.need_collapse = true;
1670 }
1671
1672 if (report.use_stdio)
1673 use_browser = 0;
1674#ifdef HAVE_SLANG_SUPPORT
1675 else if (report.use_tui)
1676 use_browser = 1;
1677#endif
1678#ifdef HAVE_GTK2_SUPPORT
1679 else if (report.use_gtk)
1680 use_browser = 2;
1681#endif
1682
1683 /* Force tty output for header output and per-thread stat. */
1684 if (report.header || report.header_only || report.show_threads)
1685 use_browser = 0;
1686 if (report.header || report.header_only)
1687 report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
1688 if (report.show_full_info)
1689 report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
1690 if (report.stats_mode || report.tasks_mode)
1691 use_browser = 0;
1692 if (report.stats_mode && report.tasks_mode) {
1693 pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
1694 goto error;
1695 }
1696
1697 if (report.total_cycles_mode) {
1698 if (sort__mode != SORT_MODE__BRANCH)
1699 report.total_cycles_mode = false;
1700 else
1701 sort_order = NULL;
1702 }
1703
1704 if (sort_order && strstr(sort_order, "type")) {
1705 report.data_type = true;
1706 annotate_opts.annotate_src = false;
1707
1708#ifndef HAVE_LIBDW_SUPPORT
1709 pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
1710 goto error;
1711#endif
1712 }
1713
1714 if (strcmp(input_name, "-") != 0)
1715 setup_browser(true);
1716 else
1717 use_browser = 0;
1718
1719 if (report.data_type && use_browser == 1) {
1720 symbol_conf.annotate_data_member = true;
1721 symbol_conf.annotate_data_sample = true;
1722 }
1723
1724 if (sort_order && strstr(sort_order, "ipc")) {
1725 parse_options_usage(report_usage, options, "s", 1);
1726 goto error;
1727 }
1728
1729 if (sort_order && strstr(sort_order, "symbol")) {
1730 if (sort__mode == SORT_MODE__BRANCH) {
1731 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1732 sort_order, "ipc_lbr");
1733 report.symbol_ipc = true;
1734 } else {
1735 snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1736 sort_order, "ipc_null");
1737 }
1738
1739 sort_order = sort_tmp;
1740 }
1741
1742 if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
1743 (setup_sorting(session->evlist) < 0)) {
1744 if (sort_order)
1745 parse_options_usage(report_usage, options, "s", 1);
1746 if (field_order)
1747 parse_options_usage(sort_order ? NULL : report_usage,
1748 options, "F", 1);
1749 goto error;
1750 }
1751
1752 if ((report.header || report.header_only) && !quiet) {
1753 perf_session__fprintf_info(session, stdout,
1754 report.show_full_info);
1755 if (report.header_only) {
1756 if (data.is_pipe) {
1757 /*
1758 * we need to process first few records
1759 * which contains PERF_RECORD_HEADER_FEATURE.
1760 */
1761 perf_session__process_events(session);
1762 }
1763 ret = 0;
1764 goto error;
1765 }
1766 } else if (use_browser == 0 && !quiet &&
1767 !report.stats_mode && !report.tasks_mode) {
1768 fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
1769 stdout);
1770 }
1771
1772 /*
1773 * Only in the TUI browser we are doing integrated annotation,
1774 * so don't allocate extra space that won't be used in the stdio
1775 * implementation.
1776 */
1777 if (ui__has_annotation() || report.symbol_ipc || report.data_type ||
1778 report.total_cycles_mode) {
1779 ret = symbol__annotation_init();
1780 if (ret < 0)
1781 goto error;
1782 /*
1783 * For searching by name on the "Browse map details".
1784 * providing it only in verbose mode not to bloat too
1785 * much struct symbol.
1786 */
1787 if (verbose > 0) {
1788 /*
1789 * XXX: Need to provide a less kludgy way to ask for
1790 * more space per symbol, the u32 is for the index on
1791 * the ui browser.
1792 * See symbol__browser_index.
1793 */
1794 symbol_conf.priv_size += sizeof(u32);
1795 }
1796 annotation_config__init();
1797 }
1798
1799 if (symbol__init(&session->header.env) < 0)
1800 goto error;
1801
1802 if (report.time_str) {
1803 ret = perf_time__parse_for_ranges(report.time_str, session,
1804 &report.ptime_range,
1805 &report.range_size,
1806 &report.range_num);
1807 if (ret < 0)
1808 goto error;
1809
1810 itrace_synth_opts__set_time_range(&itrace_synth_opts,
1811 report.ptime_range,
1812 report.range_num);
1813 }
1814
1815#ifdef HAVE_LIBTRACEEVENT
1816 if (session->tevent.pevent &&
1817 tep_set_function_resolver(session->tevent.pevent,
1818 machine__resolve_kernel_addr,
1819 &session->machines.host) < 0) {
1820 pr_err("%s: failed to set libtraceevent function resolver\n",
1821 __func__);
1822 return -1;
1823 }
1824#endif
1825 sort__setup_elide(stdout);
1826
1827 ret = __cmd_report(&report);
1828 if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) {
1829 perf_session__delete(session);
1830 last_key = K_SWITCH_INPUT_DATA;
1831 goto repeat;
1832 } else
1833 ret = 0;
1834
1835 if (!use_browser && (verbose > 2 || debug_kmaps))
1836 perf_session__dump_kmaps(session);
1837error:
1838 if (report.ptime_range) {
1839 itrace_synth_opts__clear_time_range(&itrace_synth_opts);
1840 zfree(&report.ptime_range);
1841 }
1842
1843 if (report.block_reports) {
1844 block_info__free_report(report.block_reports,
1845 report.nr_block_reports);
1846 report.block_reports = NULL;
1847 }
1848
1849 zstd_fini(&(session->zstd_data));
1850 perf_session__delete(session);
1851exit:
1852 annotation_options__exit();
1853 free(sort_order_help);
1854 free(field_order_help);
1855 return ret;
1856}