Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <stdio.h>
3#include <stdlib.h>
4#include <linux/string.h>
5
6#include "../../util/callchain.h"
7#include "../../util/debug.h"
8#include "../../util/event.h"
9#include "../../util/hist.h"
10#include "../../util/map.h"
11#include "../../util/map_groups.h"
12#include "../../util/symbol.h"
13#include "../../util/sort.h"
14#include "../../util/evsel.h"
15#include "../../util/srcline.h"
16#include "../../util/string2.h"
17#include "../../util/thread.h"
18#include <linux/ctype.h>
19#include <linux/zalloc.h>
20
21static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
22{
23 int i;
24 int ret = fprintf(fp, " ");
25
26 for (i = 0; i < left_margin; i++)
27 ret += fprintf(fp, " ");
28
29 return ret;
30}
31
32static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
33 int left_margin)
34{
35 int i;
36 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
37
38 for (i = 0; i < depth; i++)
39 if (depth_mask & (1 << i))
40 ret += fprintf(fp, "| ");
41 else
42 ret += fprintf(fp, " ");
43
44 ret += fprintf(fp, "\n");
45
46 return ret;
47}
48
49static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
50 struct callchain_list *chain,
51 int depth, int depth_mask, int period,
52 u64 total_samples, int left_margin)
53{
54 int i;
55 size_t ret = 0;
56 char bf[1024], *alloc_str = NULL;
57 char buf[64];
58 const char *str;
59
60 ret += callchain__fprintf_left_margin(fp, left_margin);
61 for (i = 0; i < depth; i++) {
62 if (depth_mask & (1 << i))
63 ret += fprintf(fp, "|");
64 else
65 ret += fprintf(fp, " ");
66 if (!period && i == depth - 1) {
67 ret += fprintf(fp, "--");
68 ret += callchain_node__fprintf_value(node, fp, total_samples);
69 ret += fprintf(fp, "--");
70 } else
71 ret += fprintf(fp, "%s", " ");
72 }
73
74 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
75
76 if (symbol_conf.show_branchflag_count) {
77 callchain_list_counts__printf_value(chain, NULL,
78 buf, sizeof(buf));
79
80 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
81 str = "Not enough memory!";
82 else
83 str = alloc_str;
84 }
85
86 fputs(str, fp);
87 fputc('\n', fp);
88 free(alloc_str);
89
90 return ret;
91}
92
93static struct symbol *rem_sq_bracket;
94static struct callchain_list rem_hits;
95
96static void init_rem_hits(void)
97{
98 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
99 if (!rem_sq_bracket) {
100 fprintf(stderr, "Not enough memory to display remaining hits\n");
101 return;
102 }
103
104 strcpy(rem_sq_bracket->name, "[...]");
105 rem_hits.ms.sym = rem_sq_bracket;
106}
107
108static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
109 u64 total_samples, int depth,
110 int depth_mask, int left_margin)
111{
112 struct rb_node *node, *next;
113 struct callchain_node *child = NULL;
114 struct callchain_list *chain;
115 int new_depth_mask = depth_mask;
116 u64 remaining;
117 size_t ret = 0;
118 int i;
119 uint entries_printed = 0;
120 int cumul_count = 0;
121
122 remaining = total_samples;
123
124 node = rb_first(root);
125 while (node) {
126 u64 new_total;
127 u64 cumul;
128
129 child = rb_entry(node, struct callchain_node, rb_node);
130 cumul = callchain_cumul_hits(child);
131 remaining -= cumul;
132 cumul_count += callchain_cumul_counts(child);
133
134 /*
135 * The depth mask manages the output of pipes that show
136 * the depth. We don't want to keep the pipes of the current
137 * level for the last child of this depth.
138 * Except if we have remaining filtered hits. They will
139 * supersede the last child
140 */
141 next = rb_next(node);
142 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
143 new_depth_mask &= ~(1 << (depth - 1));
144
145 /*
146 * But we keep the older depth mask for the line separator
147 * to keep the level link until we reach the last child
148 */
149 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
150 left_margin);
151 i = 0;
152 list_for_each_entry(chain, &child->val, list) {
153 ret += ipchain__fprintf_graph(fp, child, chain, depth,
154 new_depth_mask, i++,
155 total_samples,
156 left_margin);
157 }
158
159 if (callchain_param.mode == CHAIN_GRAPH_REL)
160 new_total = child->children_hit;
161 else
162 new_total = total_samples;
163
164 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
165 depth + 1,
166 new_depth_mask | (1 << depth),
167 left_margin);
168 node = next;
169 if (++entries_printed == callchain_param.print_limit)
170 break;
171 }
172
173 if (callchain_param.mode == CHAIN_GRAPH_REL &&
174 remaining && remaining != total_samples) {
175 struct callchain_node rem_node = {
176 .hit = remaining,
177 };
178
179 if (!rem_sq_bracket)
180 return ret;
181
182 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
183 rem_node.count = child->parent->children_count - cumul_count;
184 if (rem_node.count <= 0)
185 return ret;
186 }
187
188 new_depth_mask &= ~(1 << (depth - 1));
189 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
190 new_depth_mask, 0, total_samples,
191 left_margin);
192 }
193
194 return ret;
195}
196
197/*
198 * If have one single callchain root, don't bother printing
199 * its percentage (100 % in fractal mode and the same percentage
200 * than the hist in graph mode). This also avoid one level of column.
201 *
202 * However when percent-limit applied, it's possible that single callchain
203 * node have different (non-100% in fractal mode) percentage.
204 */
205static bool need_percent_display(struct rb_node *node, u64 parent_samples)
206{
207 struct callchain_node *cnode;
208
209 if (rb_next(node))
210 return true;
211
212 cnode = rb_entry(node, struct callchain_node, rb_node);
213 return callchain_cumul_hits(cnode) != parent_samples;
214}
215
216static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
217 u64 total_samples, u64 parent_samples,
218 int left_margin)
219{
220 struct callchain_node *cnode;
221 struct callchain_list *chain;
222 u32 entries_printed = 0;
223 bool printed = false;
224 struct rb_node *node;
225 int i = 0;
226 int ret = 0;
227 char bf[1024];
228
229 node = rb_first(root);
230 if (node && !need_percent_display(node, parent_samples)) {
231 cnode = rb_entry(node, struct callchain_node, rb_node);
232 list_for_each_entry(chain, &cnode->val, list) {
233 /*
234 * If we sort by symbol, the first entry is the same than
235 * the symbol. No need to print it otherwise it appears as
236 * displayed twice.
237 */
238 if (!i++ && field_order == NULL &&
239 sort_order && strstarts(sort_order, "sym"))
240 continue;
241
242 if (!printed) {
243 ret += callchain__fprintf_left_margin(fp, left_margin);
244 ret += fprintf(fp, "|\n");
245 ret += callchain__fprintf_left_margin(fp, left_margin);
246 ret += fprintf(fp, "---");
247 left_margin += 3;
248 printed = true;
249 } else
250 ret += callchain__fprintf_left_margin(fp, left_margin);
251
252 ret += fprintf(fp, "%s",
253 callchain_list__sym_name(chain, bf,
254 sizeof(bf),
255 false));
256
257 if (symbol_conf.show_branchflag_count)
258 ret += callchain_list_counts__printf_value(
259 chain, fp, NULL, 0);
260 ret += fprintf(fp, "\n");
261
262 if (++entries_printed == callchain_param.print_limit)
263 break;
264 }
265 root = &cnode->rb_root;
266 }
267
268 if (callchain_param.mode == CHAIN_GRAPH_REL)
269 total_samples = parent_samples;
270
271 ret += __callchain__fprintf_graph(fp, root, total_samples,
272 1, 1, left_margin);
273 if (ret) {
274 /* do not add a blank line if it printed nothing */
275 ret += fprintf(fp, "\n");
276 }
277
278 return ret;
279}
280
281static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
282 u64 total_samples)
283{
284 struct callchain_list *chain;
285 size_t ret = 0;
286 char bf[1024];
287
288 if (!node)
289 return 0;
290
291 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
292
293
294 list_for_each_entry(chain, &node->val, list) {
295 if (chain->ip >= PERF_CONTEXT_MAX)
296 continue;
297 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
298 bf, sizeof(bf), false));
299 }
300
301 return ret;
302}
303
304static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
305 u64 total_samples)
306{
307 size_t ret = 0;
308 u32 entries_printed = 0;
309 struct callchain_node *chain;
310 struct rb_node *rb_node = rb_first(tree);
311
312 while (rb_node) {
313 chain = rb_entry(rb_node, struct callchain_node, rb_node);
314
315 ret += fprintf(fp, " ");
316 ret += callchain_node__fprintf_value(chain, fp, total_samples);
317 ret += fprintf(fp, "\n");
318 ret += __callchain__fprintf_flat(fp, chain, total_samples);
319 ret += fprintf(fp, "\n");
320 if (++entries_printed == callchain_param.print_limit)
321 break;
322
323 rb_node = rb_next(rb_node);
324 }
325
326 return ret;
327}
328
329static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
330{
331 const char *sep = symbol_conf.field_sep ?: ";";
332 struct callchain_list *chain;
333 size_t ret = 0;
334 char bf[1024];
335 bool first;
336
337 if (!node)
338 return 0;
339
340 ret += __callchain__fprintf_folded(fp, node->parent);
341
342 first = (ret == 0);
343 list_for_each_entry(chain, &node->val, list) {
344 if (chain->ip >= PERF_CONTEXT_MAX)
345 continue;
346 ret += fprintf(fp, "%s%s", first ? "" : sep,
347 callchain_list__sym_name(chain,
348 bf, sizeof(bf), false));
349 first = false;
350 }
351
352 return ret;
353}
354
355static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
356 u64 total_samples)
357{
358 size_t ret = 0;
359 u32 entries_printed = 0;
360 struct callchain_node *chain;
361 struct rb_node *rb_node = rb_first(tree);
362
363 while (rb_node) {
364
365 chain = rb_entry(rb_node, struct callchain_node, rb_node);
366
367 ret += callchain_node__fprintf_value(chain, fp, total_samples);
368 ret += fprintf(fp, " ");
369 ret += __callchain__fprintf_folded(fp, chain);
370 ret += fprintf(fp, "\n");
371 if (++entries_printed == callchain_param.print_limit)
372 break;
373
374 rb_node = rb_next(rb_node);
375 }
376
377 return ret;
378}
379
380static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
381 u64 total_samples, int left_margin,
382 FILE *fp)
383{
384 u64 parent_samples = he->stat.period;
385
386 if (symbol_conf.cumulate_callchain)
387 parent_samples = he->stat_acc->period;
388
389 switch (callchain_param.mode) {
390 case CHAIN_GRAPH_REL:
391 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
392 parent_samples, left_margin);
393 break;
394 case CHAIN_GRAPH_ABS:
395 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
396 parent_samples, left_margin);
397 break;
398 case CHAIN_FLAT:
399 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
400 break;
401 case CHAIN_FOLDED:
402 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
403 break;
404 case CHAIN_NONE:
405 break;
406 default:
407 pr_err("Bad callchain mode\n");
408 }
409
410 return 0;
411}
412
413int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
414 struct perf_hpp_list *hpp_list)
415{
416 const char *sep = symbol_conf.field_sep;
417 struct perf_hpp_fmt *fmt;
418 char *start = hpp->buf;
419 int ret;
420 bool first = true;
421
422 if (symbol_conf.exclude_other && !he->parent)
423 return 0;
424
425 perf_hpp_list__for_each_format(hpp_list, fmt) {
426 if (perf_hpp__should_skip(fmt, he->hists))
427 continue;
428
429 /*
430 * If there's no field_sep, we still need
431 * to display initial ' '.
432 */
433 if (!sep || !first) {
434 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
435 advance_hpp(hpp, ret);
436 } else
437 first = false;
438
439 if (perf_hpp__use_color() && fmt->color)
440 ret = fmt->color(fmt, hpp, he);
441 else
442 ret = fmt->entry(fmt, hpp, he);
443
444 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
445 advance_hpp(hpp, ret);
446 }
447
448 return hpp->buf - start;
449}
450
451static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
452{
453 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
454}
455
456static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
457 struct perf_hpp *hpp,
458 struct hists *hists,
459 FILE *fp)
460{
461 const char *sep = symbol_conf.field_sep;
462 struct perf_hpp_fmt *fmt;
463 struct perf_hpp_list_node *fmt_node;
464 char *buf = hpp->buf;
465 size_t size = hpp->size;
466 int ret, printed = 0;
467 bool first = true;
468
469 if (symbol_conf.exclude_other && !he->parent)
470 return 0;
471
472 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
473 advance_hpp(hpp, ret);
474
475 /* the first hpp_list_node is for overhead columns */
476 fmt_node = list_first_entry(&hists->hpp_formats,
477 struct perf_hpp_list_node, list);
478 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
479 /*
480 * If there's no field_sep, we still need
481 * to display initial ' '.
482 */
483 if (!sep || !first) {
484 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
485 advance_hpp(hpp, ret);
486 } else
487 first = false;
488
489 if (perf_hpp__use_color() && fmt->color)
490 ret = fmt->color(fmt, hpp, he);
491 else
492 ret = fmt->entry(fmt, hpp, he);
493
494 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
495 advance_hpp(hpp, ret);
496 }
497
498 if (!sep)
499 ret = scnprintf(hpp->buf, hpp->size, "%*s",
500 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
501 advance_hpp(hpp, ret);
502
503 printed += fprintf(fp, "%s", buf);
504
505 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
506 hpp->buf = buf;
507 hpp->size = size;
508
509 /*
510 * No need to call hist_entry__snprintf_alignment() since this
511 * fmt is always the last column in the hierarchy mode.
512 */
513 if (perf_hpp__use_color() && fmt->color)
514 fmt->color(fmt, hpp, he);
515 else
516 fmt->entry(fmt, hpp, he);
517
518 /*
519 * dynamic entries are right-aligned but we want left-aligned
520 * in the hierarchy mode
521 */
522 printed += fprintf(fp, "%s%s", sep ?: " ", skip_spaces(buf));
523 }
524 printed += putc('\n', fp);
525
526 if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
527 u64 total = hists__total_period(hists);
528
529 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
530 goto out;
531 }
532
533out:
534 return printed;
535}
536
537static int hist_entry__block_fprintf(struct hist_entry *he,
538 char *bf, size_t size,
539 FILE *fp)
540{
541 struct block_hist *bh = container_of(he, struct block_hist, he);
542 int ret = 0;
543
544 for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
545 struct perf_hpp hpp = {
546 .buf = bf,
547 .size = size,
548 .skip = false,
549 };
550
551 bh->block_idx = i;
552 hist_entry__snprintf(he, &hpp);
553
554 if (!hpp.skip)
555 ret += fprintf(fp, "%s\n", bf);
556 }
557
558 return ret;
559}
560
561static int hist_entry__fprintf(struct hist_entry *he, size_t size,
562 char *bf, size_t bfsz, FILE *fp,
563 bool ignore_callchains)
564{
565 int ret;
566 int callchain_ret = 0;
567 struct perf_hpp hpp = {
568 .buf = bf,
569 .size = size,
570 };
571 struct hists *hists = he->hists;
572 u64 total_period = hists->stats.total_period;
573
574 if (size == 0 || size > bfsz)
575 size = hpp.size = bfsz;
576
577 if (symbol_conf.report_hierarchy)
578 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
579
580 if (symbol_conf.report_block)
581 return hist_entry__block_fprintf(he, bf, size, fp);
582
583 hist_entry__snprintf(he, &hpp);
584
585 ret = fprintf(fp, "%s\n", bf);
586
587 if (hist_entry__has_callchains(he) && !ignore_callchains)
588 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
589 0, fp);
590
591 ret += callchain_ret;
592
593 return ret;
594}
595
596static int print_hierarchy_indent(const char *sep, int indent,
597 const char *line, FILE *fp)
598{
599 int width;
600
601 if (sep != NULL || indent < 2)
602 return 0;
603
604 width = (indent - 2) * HIERARCHY_INDENT;
605
606 return fprintf(fp, "%-*.*s", width, width, line);
607}
608
609static int hists__fprintf_hierarchy_headers(struct hists *hists,
610 struct perf_hpp *hpp, FILE *fp)
611{
612 bool first_node, first_col;
613 int indent;
614 int depth;
615 unsigned width = 0;
616 unsigned header_width = 0;
617 struct perf_hpp_fmt *fmt;
618 struct perf_hpp_list_node *fmt_node;
619 const char *sep = symbol_conf.field_sep;
620
621 indent = hists->nr_hpp_node;
622
623 /* preserve max indent depth for column headers */
624 print_hierarchy_indent(sep, indent, " ", fp);
625
626 /* the first hpp_list_node is for overhead columns */
627 fmt_node = list_first_entry(&hists->hpp_formats,
628 struct perf_hpp_list_node, list);
629
630 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
631 fmt->header(fmt, hpp, hists, 0, NULL);
632 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
633 }
634
635 /* combine sort headers with ' / ' */
636 first_node = true;
637 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
638 if (!first_node)
639 header_width += fprintf(fp, " / ");
640 first_node = false;
641
642 first_col = true;
643 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
644 if (perf_hpp__should_skip(fmt, hists))
645 continue;
646
647 if (!first_col)
648 header_width += fprintf(fp, "+");
649 first_col = false;
650
651 fmt->header(fmt, hpp, hists, 0, NULL);
652
653 header_width += fprintf(fp, "%s", strim(hpp->buf));
654 }
655 }
656
657 fprintf(fp, "\n# ");
658
659 /* preserve max indent depth for initial dots */
660 print_hierarchy_indent(sep, indent, dots, fp);
661
662 /* the first hpp_list_node is for overhead columns */
663 fmt_node = list_first_entry(&hists->hpp_formats,
664 struct perf_hpp_list_node, list);
665
666 first_col = true;
667 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
668 if (!first_col)
669 fprintf(fp, "%s", sep ?: "..");
670 first_col = false;
671
672 width = fmt->width(fmt, hpp, hists);
673 fprintf(fp, "%.*s", width, dots);
674 }
675
676 depth = 0;
677 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
678 first_col = true;
679 width = depth * HIERARCHY_INDENT;
680
681 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
682 if (perf_hpp__should_skip(fmt, hists))
683 continue;
684
685 if (!first_col)
686 width++; /* for '+' sign between column header */
687 first_col = false;
688
689 width += fmt->width(fmt, hpp, hists);
690 }
691
692 if (width > header_width)
693 header_width = width;
694
695 depth++;
696 }
697
698 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
699
700 fprintf(fp, "\n#\n");
701
702 return 2;
703}
704
705static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
706 int line, FILE *fp)
707{
708 struct perf_hpp_fmt *fmt;
709 const char *sep = symbol_conf.field_sep;
710 bool first = true;
711 int span = 0;
712
713 hists__for_each_format(hists, fmt) {
714 if (perf_hpp__should_skip(fmt, hists))
715 continue;
716
717 if (!first && !span)
718 fprintf(fp, "%s", sep ?: " ");
719 else
720 first = false;
721
722 fmt->header(fmt, hpp, hists, line, &span);
723
724 if (!span)
725 fprintf(fp, "%s", hpp->buf);
726 }
727}
728
729static int
730hists__fprintf_standard_headers(struct hists *hists,
731 struct perf_hpp *hpp,
732 FILE *fp)
733{
734 struct perf_hpp_list *hpp_list = hists->hpp_list;
735 struct perf_hpp_fmt *fmt;
736 unsigned int width;
737 const char *sep = symbol_conf.field_sep;
738 bool first = true;
739 int line;
740
741 for (line = 0; line < hpp_list->nr_header_lines; line++) {
742 /* first # is displayed one level up */
743 if (line)
744 fprintf(fp, "# ");
745 fprintf_line(hists, hpp, line, fp);
746 fprintf(fp, "\n");
747 }
748
749 if (sep)
750 return hpp_list->nr_header_lines;
751
752 first = true;
753
754 fprintf(fp, "# ");
755
756 hists__for_each_format(hists, fmt) {
757 unsigned int i;
758
759 if (perf_hpp__should_skip(fmt, hists))
760 continue;
761
762 if (!first)
763 fprintf(fp, "%s", sep ?: " ");
764 else
765 first = false;
766
767 width = fmt->width(fmt, hpp, hists);
768 for (i = 0; i < width; i++)
769 fprintf(fp, ".");
770 }
771
772 fprintf(fp, "\n");
773 fprintf(fp, "#\n");
774 return hpp_list->nr_header_lines + 2;
775}
776
777int hists__fprintf_headers(struct hists *hists, FILE *fp)
778{
779 char bf[1024];
780 struct perf_hpp dummy_hpp = {
781 .buf = bf,
782 .size = sizeof(bf),
783 };
784
785 fprintf(fp, "# ");
786
787 if (symbol_conf.report_hierarchy)
788 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
789 else
790 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
791
792}
793
794size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
795 int max_cols, float min_pcnt, FILE *fp,
796 bool ignore_callchains)
797{
798 struct rb_node *nd;
799 size_t ret = 0;
800 const char *sep = symbol_conf.field_sep;
801 int nr_rows = 0;
802 size_t linesz;
803 char *line = NULL;
804 unsigned indent;
805
806 init_rem_hits();
807
808 hists__reset_column_width(hists);
809
810 if (symbol_conf.col_width_list_str)
811 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
812
813 if (show_header)
814 nr_rows += hists__fprintf_headers(hists, fp);
815
816 if (max_rows && nr_rows >= max_rows)
817 goto out;
818
819 linesz = hists__sort_list_width(hists) + 3 + 1;
820 linesz += perf_hpp__color_overhead();
821 line = malloc(linesz);
822 if (line == NULL) {
823 ret = -1;
824 goto out;
825 }
826
827 indent = hists__overhead_width(hists) + 4;
828
829 for (nd = rb_first_cached(&hists->entries); nd;
830 nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
831 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
832 float percent;
833
834 if (h->filtered)
835 continue;
836
837 percent = hist_entry__get_percent_limit(h);
838 if (percent < min_pcnt)
839 continue;
840
841 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
842
843 if (max_rows && ++nr_rows >= max_rows)
844 break;
845
846 /*
847 * If all children are filtered out or percent-limited,
848 * display "no entry >= x.xx%" message.
849 */
850 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
851 int depth = hists->nr_hpp_node + h->depth + 1;
852
853 print_hierarchy_indent(sep, depth, " ", fp);
854 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
855
856 if (max_rows && ++nr_rows >= max_rows)
857 break;
858 }
859
860 if (h->ms.map == NULL && verbose > 1) {
861 map_groups__fprintf(h->thread->mg, fp);
862 fprintf(fp, "%.10s end\n", graph_dotted_line);
863 }
864 }
865
866 free(line);
867out:
868 zfree(&rem_sq_bracket);
869
870 return ret;
871}
872
873size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
874{
875 int i;
876 size_t ret = 0;
877
878 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
879 const char *name;
880
881 name = perf_event__name(i);
882 if (!strcmp(name, "UNKNOWN"))
883 continue;
884
885 ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
886 }
887
888 return ret;
889}
1#include <stdio.h>
2
3#include "../../util/util.h"
4#include "../../util/hist.h"
5#include "../../util/sort.h"
6#include "../../util/evsel.h"
7
8
9static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10{
11 int i;
12 int ret = fprintf(fp, " ");
13
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
16
17 return ret;
18}
19
20static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
21 int left_margin)
22{
23 int i;
24 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
25
26 for (i = 0; i < depth; i++)
27 if (depth_mask & (1 << i))
28 ret += fprintf(fp, "| ");
29 else
30 ret += fprintf(fp, " ");
31
32 ret += fprintf(fp, "\n");
33
34 return ret;
35}
36
37static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
38 struct callchain_list *chain,
39 int depth, int depth_mask, int period,
40 u64 total_samples, int left_margin)
41{
42 int i;
43 size_t ret = 0;
44 char bf[1024], *alloc_str = NULL;
45 char buf[64];
46 const char *str;
47
48 ret += callchain__fprintf_left_margin(fp, left_margin);
49 for (i = 0; i < depth; i++) {
50 if (depth_mask & (1 << i))
51 ret += fprintf(fp, "|");
52 else
53 ret += fprintf(fp, " ");
54 if (!period && i == depth - 1) {
55 ret += fprintf(fp, "--");
56 ret += callchain_node__fprintf_value(node, fp, total_samples);
57 ret += fprintf(fp, "--");
58 } else
59 ret += fprintf(fp, "%s", " ");
60 }
61
62 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
63
64 if (symbol_conf.show_branchflag_count) {
65 if (!period)
66 callchain_list_counts__printf_value(node, chain, NULL,
67 buf, sizeof(buf));
68 else
69 callchain_list_counts__printf_value(NULL, chain, NULL,
70 buf, sizeof(buf));
71
72 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
73 str = "Not enough memory!";
74 else
75 str = alloc_str;
76 }
77
78 fputs(str, fp);
79 fputc('\n', fp);
80 free(alloc_str);
81 return ret;
82}
83
84static struct symbol *rem_sq_bracket;
85static struct callchain_list rem_hits;
86
87static void init_rem_hits(void)
88{
89 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
90 if (!rem_sq_bracket) {
91 fprintf(stderr, "Not enough memory to display remaining hits\n");
92 return;
93 }
94
95 strcpy(rem_sq_bracket->name, "[...]");
96 rem_hits.ms.sym = rem_sq_bracket;
97}
98
99static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
100 u64 total_samples, int depth,
101 int depth_mask, int left_margin)
102{
103 struct rb_node *node, *next;
104 struct callchain_node *child = NULL;
105 struct callchain_list *chain;
106 int new_depth_mask = depth_mask;
107 u64 remaining;
108 size_t ret = 0;
109 int i;
110 uint entries_printed = 0;
111 int cumul_count = 0;
112
113 remaining = total_samples;
114
115 node = rb_first(root);
116 while (node) {
117 u64 new_total;
118 u64 cumul;
119
120 child = rb_entry(node, struct callchain_node, rb_node);
121 cumul = callchain_cumul_hits(child);
122 remaining -= cumul;
123 cumul_count += callchain_cumul_counts(child);
124
125 /*
126 * The depth mask manages the output of pipes that show
127 * the depth. We don't want to keep the pipes of the current
128 * level for the last child of this depth.
129 * Except if we have remaining filtered hits. They will
130 * supersede the last child
131 */
132 next = rb_next(node);
133 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
134 new_depth_mask &= ~(1 << (depth - 1));
135
136 /*
137 * But we keep the older depth mask for the line separator
138 * to keep the level link until we reach the last child
139 */
140 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
141 left_margin);
142 i = 0;
143 list_for_each_entry(chain, &child->val, list) {
144 ret += ipchain__fprintf_graph(fp, child, chain, depth,
145 new_depth_mask, i++,
146 total_samples,
147 left_margin);
148 }
149
150 if (callchain_param.mode == CHAIN_GRAPH_REL)
151 new_total = child->children_hit;
152 else
153 new_total = total_samples;
154
155 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
156 depth + 1,
157 new_depth_mask | (1 << depth),
158 left_margin);
159 node = next;
160 if (++entries_printed == callchain_param.print_limit)
161 break;
162 }
163
164 if (callchain_param.mode == CHAIN_GRAPH_REL &&
165 remaining && remaining != total_samples) {
166 struct callchain_node rem_node = {
167 .hit = remaining,
168 };
169
170 if (!rem_sq_bracket)
171 return ret;
172
173 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
174 rem_node.count = child->parent->children_count - cumul_count;
175 if (rem_node.count <= 0)
176 return ret;
177 }
178
179 new_depth_mask &= ~(1 << (depth - 1));
180 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
181 new_depth_mask, 0, total_samples,
182 left_margin);
183 }
184
185 return ret;
186}
187
188/*
189 * If have one single callchain root, don't bother printing
190 * its percentage (100 % in fractal mode and the same percentage
191 * than the hist in graph mode). This also avoid one level of column.
192 *
193 * However when percent-limit applied, it's possible that single callchain
194 * node have different (non-100% in fractal mode) percentage.
195 */
196static bool need_percent_display(struct rb_node *node, u64 parent_samples)
197{
198 struct callchain_node *cnode;
199
200 if (rb_next(node))
201 return true;
202
203 cnode = rb_entry(node, struct callchain_node, rb_node);
204 return callchain_cumul_hits(cnode) != parent_samples;
205}
206
207static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
208 u64 total_samples, u64 parent_samples,
209 int left_margin)
210{
211 struct callchain_node *cnode;
212 struct callchain_list *chain;
213 u32 entries_printed = 0;
214 bool printed = false;
215 struct rb_node *node;
216 int i = 0;
217 int ret = 0;
218 char bf[1024];
219
220 node = rb_first(root);
221 if (node && !need_percent_display(node, parent_samples)) {
222 cnode = rb_entry(node, struct callchain_node, rb_node);
223 list_for_each_entry(chain, &cnode->val, list) {
224 /*
225 * If we sort by symbol, the first entry is the same than
226 * the symbol. No need to print it otherwise it appears as
227 * displayed twice.
228 */
229 if (!i++ && field_order == NULL &&
230 sort_order && !prefixcmp(sort_order, "sym"))
231 continue;
232 if (!printed) {
233 ret += callchain__fprintf_left_margin(fp, left_margin);
234 ret += fprintf(fp, "|\n");
235 ret += callchain__fprintf_left_margin(fp, left_margin);
236 ret += fprintf(fp, "---");
237 left_margin += 3;
238 printed = true;
239 } else
240 ret += callchain__fprintf_left_margin(fp, left_margin);
241
242 ret += fprintf(fp, "%s",
243 callchain_list__sym_name(chain, bf,
244 sizeof(bf),
245 false));
246
247 if (symbol_conf.show_branchflag_count)
248 ret += callchain_list_counts__printf_value(
249 NULL, chain, fp, NULL, 0);
250 ret += fprintf(fp, "\n");
251
252 if (++entries_printed == callchain_param.print_limit)
253 break;
254 }
255 root = &cnode->rb_root;
256 }
257
258 if (callchain_param.mode == CHAIN_GRAPH_REL)
259 total_samples = parent_samples;
260
261 ret += __callchain__fprintf_graph(fp, root, total_samples,
262 1, 1, left_margin);
263 if (ret) {
264 /* do not add a blank line if it printed nothing */
265 ret += fprintf(fp, "\n");
266 }
267
268 return ret;
269}
270
271static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
272 u64 total_samples)
273{
274 struct callchain_list *chain;
275 size_t ret = 0;
276 char bf[1024];
277
278 if (!node)
279 return 0;
280
281 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
282
283
284 list_for_each_entry(chain, &node->val, list) {
285 if (chain->ip >= PERF_CONTEXT_MAX)
286 continue;
287 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
288 bf, sizeof(bf), false));
289 }
290
291 return ret;
292}
293
294static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
295 u64 total_samples)
296{
297 size_t ret = 0;
298 u32 entries_printed = 0;
299 struct callchain_node *chain;
300 struct rb_node *rb_node = rb_first(tree);
301
302 while (rb_node) {
303 chain = rb_entry(rb_node, struct callchain_node, rb_node);
304
305 ret += fprintf(fp, " ");
306 ret += callchain_node__fprintf_value(chain, fp, total_samples);
307 ret += fprintf(fp, "\n");
308 ret += __callchain__fprintf_flat(fp, chain, total_samples);
309 ret += fprintf(fp, "\n");
310 if (++entries_printed == callchain_param.print_limit)
311 break;
312
313 rb_node = rb_next(rb_node);
314 }
315
316 return ret;
317}
318
319static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
320{
321 const char *sep = symbol_conf.field_sep ?: ";";
322 struct callchain_list *chain;
323 size_t ret = 0;
324 char bf[1024];
325 bool first;
326
327 if (!node)
328 return 0;
329
330 ret += __callchain__fprintf_folded(fp, node->parent);
331
332 first = (ret == 0);
333 list_for_each_entry(chain, &node->val, list) {
334 if (chain->ip >= PERF_CONTEXT_MAX)
335 continue;
336 ret += fprintf(fp, "%s%s", first ? "" : sep,
337 callchain_list__sym_name(chain,
338 bf, sizeof(bf), false));
339 first = false;
340 }
341
342 return ret;
343}
344
345static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
346 u64 total_samples)
347{
348 size_t ret = 0;
349 u32 entries_printed = 0;
350 struct callchain_node *chain;
351 struct rb_node *rb_node = rb_first(tree);
352
353 while (rb_node) {
354
355 chain = rb_entry(rb_node, struct callchain_node, rb_node);
356
357 ret += callchain_node__fprintf_value(chain, fp, total_samples);
358 ret += fprintf(fp, " ");
359 ret += __callchain__fprintf_folded(fp, chain);
360 ret += fprintf(fp, "\n");
361 if (++entries_printed == callchain_param.print_limit)
362 break;
363
364 rb_node = rb_next(rb_node);
365 }
366
367 return ret;
368}
369
370static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
371 u64 total_samples, int left_margin,
372 FILE *fp)
373{
374 u64 parent_samples = he->stat.period;
375
376 if (symbol_conf.cumulate_callchain)
377 parent_samples = he->stat_acc->period;
378
379 switch (callchain_param.mode) {
380 case CHAIN_GRAPH_REL:
381 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
382 parent_samples, left_margin);
383 break;
384 case CHAIN_GRAPH_ABS:
385 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
386 parent_samples, left_margin);
387 break;
388 case CHAIN_FLAT:
389 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
390 break;
391 case CHAIN_FOLDED:
392 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
393 break;
394 case CHAIN_NONE:
395 break;
396 default:
397 pr_err("Bad callchain mode\n");
398 }
399
400 return 0;
401}
402
403int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
404 struct perf_hpp_list *hpp_list)
405{
406 const char *sep = symbol_conf.field_sep;
407 struct perf_hpp_fmt *fmt;
408 char *start = hpp->buf;
409 int ret;
410 bool first = true;
411
412 if (symbol_conf.exclude_other && !he->parent)
413 return 0;
414
415 perf_hpp_list__for_each_format(hpp_list, fmt) {
416 if (perf_hpp__should_skip(fmt, he->hists))
417 continue;
418
419 /*
420 * If there's no field_sep, we still need
421 * to display initial ' '.
422 */
423 if (!sep || !first) {
424 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
425 advance_hpp(hpp, ret);
426 } else
427 first = false;
428
429 if (perf_hpp__use_color() && fmt->color)
430 ret = fmt->color(fmt, hpp, he);
431 else
432 ret = fmt->entry(fmt, hpp, he);
433
434 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
435 advance_hpp(hpp, ret);
436 }
437
438 return hpp->buf - start;
439}
440
441static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
442{
443 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
444}
445
446static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
447 struct perf_hpp *hpp,
448 struct hists *hists,
449 FILE *fp)
450{
451 const char *sep = symbol_conf.field_sep;
452 struct perf_hpp_fmt *fmt;
453 struct perf_hpp_list_node *fmt_node;
454 char *buf = hpp->buf;
455 size_t size = hpp->size;
456 int ret, printed = 0;
457 bool first = true;
458
459 if (symbol_conf.exclude_other && !he->parent)
460 return 0;
461
462 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
463 advance_hpp(hpp, ret);
464
465 /* the first hpp_list_node is for overhead columns */
466 fmt_node = list_first_entry(&hists->hpp_formats,
467 struct perf_hpp_list_node, list);
468 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
469 /*
470 * If there's no field_sep, we still need
471 * to display initial ' '.
472 */
473 if (!sep || !first) {
474 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
475 advance_hpp(hpp, ret);
476 } else
477 first = false;
478
479 if (perf_hpp__use_color() && fmt->color)
480 ret = fmt->color(fmt, hpp, he);
481 else
482 ret = fmt->entry(fmt, hpp, he);
483
484 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
485 advance_hpp(hpp, ret);
486 }
487
488 if (!sep)
489 ret = scnprintf(hpp->buf, hpp->size, "%*s",
490 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
491 advance_hpp(hpp, ret);
492
493 printed += fprintf(fp, "%s", buf);
494
495 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
496 hpp->buf = buf;
497 hpp->size = size;
498
499 /*
500 * No need to call hist_entry__snprintf_alignment() since this
501 * fmt is always the last column in the hierarchy mode.
502 */
503 if (perf_hpp__use_color() && fmt->color)
504 fmt->color(fmt, hpp, he);
505 else
506 fmt->entry(fmt, hpp, he);
507
508 /*
509 * dynamic entries are right-aligned but we want left-aligned
510 * in the hierarchy mode
511 */
512 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
513 }
514 printed += putc('\n', fp);
515
516 if (symbol_conf.use_callchain && he->leaf) {
517 u64 total = hists__total_period(hists);
518
519 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
520 goto out;
521 }
522
523out:
524 return printed;
525}
526
527static int hist_entry__fprintf(struct hist_entry *he, size_t size,
528 char *bf, size_t bfsz, FILE *fp,
529 bool use_callchain)
530{
531 int ret;
532 struct perf_hpp hpp = {
533 .buf = bf,
534 .size = size,
535 };
536 struct hists *hists = he->hists;
537 u64 total_period = hists->stats.total_period;
538
539 if (size == 0 || size > bfsz)
540 size = hpp.size = bfsz;
541
542 if (symbol_conf.report_hierarchy)
543 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
544
545 hist_entry__snprintf(he, &hpp);
546
547 ret = fprintf(fp, "%s\n", bf);
548
549 if (use_callchain)
550 ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
551
552 return ret;
553}
554
555static int print_hierarchy_indent(const char *sep, int indent,
556 const char *line, FILE *fp)
557{
558 if (sep != NULL || indent < 2)
559 return 0;
560
561 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
562}
563
564static int hists__fprintf_hierarchy_headers(struct hists *hists,
565 struct perf_hpp *hpp, FILE *fp)
566{
567 bool first_node, first_col;
568 int indent;
569 int depth;
570 unsigned width = 0;
571 unsigned header_width = 0;
572 struct perf_hpp_fmt *fmt;
573 struct perf_hpp_list_node *fmt_node;
574 const char *sep = symbol_conf.field_sep;
575
576 indent = hists->nr_hpp_node;
577
578 /* preserve max indent depth for column headers */
579 print_hierarchy_indent(sep, indent, spaces, fp);
580
581 /* the first hpp_list_node is for overhead columns */
582 fmt_node = list_first_entry(&hists->hpp_formats,
583 struct perf_hpp_list_node, list);
584
585 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
586 fmt->header(fmt, hpp, hists, 0, NULL);
587 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
588 }
589
590 /* combine sort headers with ' / ' */
591 first_node = true;
592 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
593 if (!first_node)
594 header_width += fprintf(fp, " / ");
595 first_node = false;
596
597 first_col = true;
598 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
599 if (perf_hpp__should_skip(fmt, hists))
600 continue;
601
602 if (!first_col)
603 header_width += fprintf(fp, "+");
604 first_col = false;
605
606 fmt->header(fmt, hpp, hists, 0, NULL);
607
608 header_width += fprintf(fp, "%s", trim(hpp->buf));
609 }
610 }
611
612 fprintf(fp, "\n# ");
613
614 /* preserve max indent depth for initial dots */
615 print_hierarchy_indent(sep, indent, dots, fp);
616
617 /* the first hpp_list_node is for overhead columns */
618 fmt_node = list_first_entry(&hists->hpp_formats,
619 struct perf_hpp_list_node, list);
620
621 first_col = true;
622 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
623 if (!first_col)
624 fprintf(fp, "%s", sep ?: "..");
625 first_col = false;
626
627 width = fmt->width(fmt, hpp, hists);
628 fprintf(fp, "%.*s", width, dots);
629 }
630
631 depth = 0;
632 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
633 first_col = true;
634 width = depth * HIERARCHY_INDENT;
635
636 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
637 if (perf_hpp__should_skip(fmt, hists))
638 continue;
639
640 if (!first_col)
641 width++; /* for '+' sign between column header */
642 first_col = false;
643
644 width += fmt->width(fmt, hpp, hists);
645 }
646
647 if (width > header_width)
648 header_width = width;
649
650 depth++;
651 }
652
653 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
654
655 fprintf(fp, "\n#\n");
656
657 return 2;
658}
659
660static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
661 int line, FILE *fp)
662{
663 struct perf_hpp_fmt *fmt;
664 const char *sep = symbol_conf.field_sep;
665 bool first = true;
666 int span = 0;
667
668 hists__for_each_format(hists, fmt) {
669 if (perf_hpp__should_skip(fmt, hists))
670 continue;
671
672 if (!first && !span)
673 fprintf(fp, "%s", sep ?: " ");
674 else
675 first = false;
676
677 fmt->header(fmt, hpp, hists, line, &span);
678
679 if (!span)
680 fprintf(fp, "%s", hpp->buf);
681 }
682}
683
684static int
685hists__fprintf_standard_headers(struct hists *hists,
686 struct perf_hpp *hpp,
687 FILE *fp)
688{
689 struct perf_hpp_list *hpp_list = hists->hpp_list;
690 struct perf_hpp_fmt *fmt;
691 unsigned int width;
692 const char *sep = symbol_conf.field_sep;
693 bool first = true;
694 int line;
695
696 for (line = 0; line < hpp_list->nr_header_lines; line++) {
697 /* first # is displayed one level up */
698 if (line)
699 fprintf(fp, "# ");
700 fprintf_line(hists, hpp, line, fp);
701 fprintf(fp, "\n");
702 }
703
704 if (sep)
705 return hpp_list->nr_header_lines;
706
707 first = true;
708
709 fprintf(fp, "# ");
710
711 hists__for_each_format(hists, fmt) {
712 unsigned int i;
713
714 if (perf_hpp__should_skip(fmt, hists))
715 continue;
716
717 if (!first)
718 fprintf(fp, "%s", sep ?: " ");
719 else
720 first = false;
721
722 width = fmt->width(fmt, hpp, hists);
723 for (i = 0; i < width; i++)
724 fprintf(fp, ".");
725 }
726
727 fprintf(fp, "\n");
728 fprintf(fp, "#\n");
729 return hpp_list->nr_header_lines + 2;
730}
731
732int hists__fprintf_headers(struct hists *hists, FILE *fp)
733{
734 char bf[1024];
735 struct perf_hpp dummy_hpp = {
736 .buf = bf,
737 .size = sizeof(bf),
738 };
739
740 fprintf(fp, "# ");
741
742 if (symbol_conf.report_hierarchy)
743 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
744 else
745 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
746
747}
748
749size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
750 int max_cols, float min_pcnt, FILE *fp,
751 bool use_callchain)
752{
753 struct rb_node *nd;
754 size_t ret = 0;
755 const char *sep = symbol_conf.field_sep;
756 int nr_rows = 0;
757 size_t linesz;
758 char *line = NULL;
759 unsigned indent;
760
761 init_rem_hits();
762
763 hists__reset_column_width(hists);
764
765 if (symbol_conf.col_width_list_str)
766 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
767
768 if (show_header)
769 nr_rows += hists__fprintf_headers(hists, fp);
770
771 if (max_rows && nr_rows >= max_rows)
772 goto out;
773
774 linesz = hists__sort_list_width(hists) + 3 + 1;
775 linesz += perf_hpp__color_overhead();
776 line = malloc(linesz);
777 if (line == NULL) {
778 ret = -1;
779 goto out;
780 }
781
782 indent = hists__overhead_width(hists) + 4;
783
784 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
785 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
786 float percent;
787
788 if (h->filtered)
789 continue;
790
791 percent = hist_entry__get_percent_limit(h);
792 if (percent < min_pcnt)
793 continue;
794
795 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
796
797 if (max_rows && ++nr_rows >= max_rows)
798 break;
799
800 /*
801 * If all children are filtered out or percent-limited,
802 * display "no entry >= x.xx%" message.
803 */
804 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
805 int depth = hists->nr_hpp_node + h->depth + 1;
806
807 print_hierarchy_indent(sep, depth, spaces, fp);
808 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
809
810 if (max_rows && ++nr_rows >= max_rows)
811 break;
812 }
813
814 if (h->ms.map == NULL && verbose > 1) {
815 __map_groups__fprintf_maps(h->thread->mg,
816 MAP__FUNCTION, fp);
817 fprintf(fp, "%.10s end\n", graph_dotted_line);
818 }
819 }
820
821 free(line);
822out:
823 zfree(&rem_sq_bracket);
824
825 return ret;
826}
827
828size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
829{
830 int i;
831 size_t ret = 0;
832
833 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
834 const char *name;
835
836 if (stats->nr_events[i] == 0)
837 continue;
838
839 name = perf_event__name(i);
840 if (!strcmp(name, "UNKNOWN"))
841 continue;
842
843 ret += fprintf(fp, "%16s events: %10d\n", name,
844 stats->nr_events[i]);
845 }
846
847 return ret;
848}