Loading...
1#include <stdio.h>
2
3#include "../../util/util.h"
4#include "../../util/hist.h"
5#include "../../util/sort.h"
6#include "../../util/evsel.h"
7
8
9static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10{
11 int i;
12 int ret = fprintf(fp, " ");
13
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
16
17 return ret;
18}
19
20static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
21 int left_margin)
22{
23 int i;
24 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
25
26 for (i = 0; i < depth; i++)
27 if (depth_mask & (1 << i))
28 ret += fprintf(fp, "| ");
29 else
30 ret += fprintf(fp, " ");
31
32 ret += fprintf(fp, "\n");
33
34 return ret;
35}
36
37static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
38 struct callchain_list *chain,
39 int depth, int depth_mask, int period,
40 u64 total_samples, int left_margin)
41{
42 int i;
43 size_t ret = 0;
44 char bf[1024];
45
46 ret += callchain__fprintf_left_margin(fp, left_margin);
47 for (i = 0; i < depth; i++) {
48 if (depth_mask & (1 << i))
49 ret += fprintf(fp, "|");
50 else
51 ret += fprintf(fp, " ");
52 if (!period && i == depth - 1) {
53 ret += fprintf(fp, "--");
54 ret += callchain_node__fprintf_value(node, fp, total_samples);
55 ret += fprintf(fp, "--");
56 } else
57 ret += fprintf(fp, "%s", " ");
58 }
59 fputs(callchain_list__sym_name(chain, bf, sizeof(bf), false), fp);
60 fputc('\n', fp);
61 return ret;
62}
63
64static struct symbol *rem_sq_bracket;
65static struct callchain_list rem_hits;
66
67static void init_rem_hits(void)
68{
69 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
70 if (!rem_sq_bracket) {
71 fprintf(stderr, "Not enough memory to display remaining hits\n");
72 return;
73 }
74
75 strcpy(rem_sq_bracket->name, "[...]");
76 rem_hits.ms.sym = rem_sq_bracket;
77}
78
79static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
80 u64 total_samples, int depth,
81 int depth_mask, int left_margin)
82{
83 struct rb_node *node, *next;
84 struct callchain_node *child = NULL;
85 struct callchain_list *chain;
86 int new_depth_mask = depth_mask;
87 u64 remaining;
88 size_t ret = 0;
89 int i;
90 uint entries_printed = 0;
91 int cumul_count = 0;
92
93 remaining = total_samples;
94
95 node = rb_first(root);
96 while (node) {
97 u64 new_total;
98 u64 cumul;
99
100 child = rb_entry(node, struct callchain_node, rb_node);
101 cumul = callchain_cumul_hits(child);
102 remaining -= cumul;
103 cumul_count += callchain_cumul_counts(child);
104
105 /*
106 * The depth mask manages the output of pipes that show
107 * the depth. We don't want to keep the pipes of the current
108 * level for the last child of this depth.
109 * Except if we have remaining filtered hits. They will
110 * supersede the last child
111 */
112 next = rb_next(node);
113 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
114 new_depth_mask &= ~(1 << (depth - 1));
115
116 /*
117 * But we keep the older depth mask for the line separator
118 * to keep the level link until we reach the last child
119 */
120 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
121 left_margin);
122 i = 0;
123 list_for_each_entry(chain, &child->val, list) {
124 ret += ipchain__fprintf_graph(fp, child, chain, depth,
125 new_depth_mask, i++,
126 total_samples,
127 left_margin);
128 }
129
130 if (callchain_param.mode == CHAIN_GRAPH_REL)
131 new_total = child->children_hit;
132 else
133 new_total = total_samples;
134
135 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
136 depth + 1,
137 new_depth_mask | (1 << depth),
138 left_margin);
139 node = next;
140 if (++entries_printed == callchain_param.print_limit)
141 break;
142 }
143
144 if (callchain_param.mode == CHAIN_GRAPH_REL &&
145 remaining && remaining != total_samples) {
146 struct callchain_node rem_node = {
147 .hit = remaining,
148 };
149
150 if (!rem_sq_bracket)
151 return ret;
152
153 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
154 rem_node.count = child->parent->children_count - cumul_count;
155 if (rem_node.count <= 0)
156 return ret;
157 }
158
159 new_depth_mask &= ~(1 << (depth - 1));
160 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
161 new_depth_mask, 0, total_samples,
162 left_margin);
163 }
164
165 return ret;
166}
167
168/*
169 * If have one single callchain root, don't bother printing
170 * its percentage (100 % in fractal mode and the same percentage
171 * than the hist in graph mode). This also avoid one level of column.
172 *
173 * However when percent-limit applied, it's possible that single callchain
174 * node have different (non-100% in fractal mode) percentage.
175 */
176static bool need_percent_display(struct rb_node *node, u64 parent_samples)
177{
178 struct callchain_node *cnode;
179
180 if (rb_next(node))
181 return true;
182
183 cnode = rb_entry(node, struct callchain_node, rb_node);
184 return callchain_cumul_hits(cnode) != parent_samples;
185}
186
187static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
188 u64 total_samples, u64 parent_samples,
189 int left_margin)
190{
191 struct callchain_node *cnode;
192 struct callchain_list *chain;
193 u32 entries_printed = 0;
194 bool printed = false;
195 struct rb_node *node;
196 int i = 0;
197 int ret = 0;
198 char bf[1024];
199
200 node = rb_first(root);
201 if (node && !need_percent_display(node, parent_samples)) {
202 cnode = rb_entry(node, struct callchain_node, rb_node);
203 list_for_each_entry(chain, &cnode->val, list) {
204 /*
205 * If we sort by symbol, the first entry is the same than
206 * the symbol. No need to print it otherwise it appears as
207 * displayed twice.
208 */
209 if (!i++ && field_order == NULL &&
210 sort_order && !prefixcmp(sort_order, "sym"))
211 continue;
212 if (!printed) {
213 ret += callchain__fprintf_left_margin(fp, left_margin);
214 ret += fprintf(fp, "|\n");
215 ret += callchain__fprintf_left_margin(fp, left_margin);
216 ret += fprintf(fp, "---");
217 left_margin += 3;
218 printed = true;
219 } else
220 ret += callchain__fprintf_left_margin(fp, left_margin);
221
222 ret += fprintf(fp, "%s\n", callchain_list__sym_name(chain, bf, sizeof(bf),
223 false));
224
225 if (++entries_printed == callchain_param.print_limit)
226 break;
227 }
228 root = &cnode->rb_root;
229 }
230
231 if (callchain_param.mode == CHAIN_GRAPH_REL)
232 total_samples = parent_samples;
233
234 ret += __callchain__fprintf_graph(fp, root, total_samples,
235 1, 1, left_margin);
236 if (ret) {
237 /* do not add a blank line if it printed nothing */
238 ret += fprintf(fp, "\n");
239 }
240
241 return ret;
242}
243
244static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
245 u64 total_samples)
246{
247 struct callchain_list *chain;
248 size_t ret = 0;
249 char bf[1024];
250
251 if (!node)
252 return 0;
253
254 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
255
256
257 list_for_each_entry(chain, &node->val, list) {
258 if (chain->ip >= PERF_CONTEXT_MAX)
259 continue;
260 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
261 bf, sizeof(bf), false));
262 }
263
264 return ret;
265}
266
267static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
268 u64 total_samples)
269{
270 size_t ret = 0;
271 u32 entries_printed = 0;
272 struct callchain_node *chain;
273 struct rb_node *rb_node = rb_first(tree);
274
275 while (rb_node) {
276 chain = rb_entry(rb_node, struct callchain_node, rb_node);
277
278 ret += fprintf(fp, " ");
279 ret += callchain_node__fprintf_value(chain, fp, total_samples);
280 ret += fprintf(fp, "\n");
281 ret += __callchain__fprintf_flat(fp, chain, total_samples);
282 ret += fprintf(fp, "\n");
283 if (++entries_printed == callchain_param.print_limit)
284 break;
285
286 rb_node = rb_next(rb_node);
287 }
288
289 return ret;
290}
291
292static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
293{
294 const char *sep = symbol_conf.field_sep ?: ";";
295 struct callchain_list *chain;
296 size_t ret = 0;
297 char bf[1024];
298 bool first;
299
300 if (!node)
301 return 0;
302
303 ret += __callchain__fprintf_folded(fp, node->parent);
304
305 first = (ret == 0);
306 list_for_each_entry(chain, &node->val, list) {
307 if (chain->ip >= PERF_CONTEXT_MAX)
308 continue;
309 ret += fprintf(fp, "%s%s", first ? "" : sep,
310 callchain_list__sym_name(chain,
311 bf, sizeof(bf), false));
312 first = false;
313 }
314
315 return ret;
316}
317
318static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
319 u64 total_samples)
320{
321 size_t ret = 0;
322 u32 entries_printed = 0;
323 struct callchain_node *chain;
324 struct rb_node *rb_node = rb_first(tree);
325
326 while (rb_node) {
327
328 chain = rb_entry(rb_node, struct callchain_node, rb_node);
329
330 ret += callchain_node__fprintf_value(chain, fp, total_samples);
331 ret += fprintf(fp, " ");
332 ret += __callchain__fprintf_folded(fp, chain);
333 ret += fprintf(fp, "\n");
334 if (++entries_printed == callchain_param.print_limit)
335 break;
336
337 rb_node = rb_next(rb_node);
338 }
339
340 return ret;
341}
342
343static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
344 u64 total_samples, int left_margin,
345 FILE *fp)
346{
347 u64 parent_samples = he->stat.period;
348
349 if (symbol_conf.cumulate_callchain)
350 parent_samples = he->stat_acc->period;
351
352 switch (callchain_param.mode) {
353 case CHAIN_GRAPH_REL:
354 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
355 parent_samples, left_margin);
356 break;
357 case CHAIN_GRAPH_ABS:
358 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
359 parent_samples, left_margin);
360 break;
361 case CHAIN_FLAT:
362 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
363 break;
364 case CHAIN_FOLDED:
365 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
366 break;
367 case CHAIN_NONE:
368 break;
369 default:
370 pr_err("Bad callchain mode\n");
371 }
372
373 return 0;
374}
375
376static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
377{
378 const char *sep = symbol_conf.field_sep;
379 struct perf_hpp_fmt *fmt;
380 char *start = hpp->buf;
381 int ret;
382 bool first = true;
383
384 if (symbol_conf.exclude_other && !he->parent)
385 return 0;
386
387 hists__for_each_format(he->hists, fmt) {
388 if (perf_hpp__should_skip(fmt, he->hists))
389 continue;
390
391 /*
392 * If there's no field_sep, we still need
393 * to display initial ' '.
394 */
395 if (!sep || !first) {
396 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
397 advance_hpp(hpp, ret);
398 } else
399 first = false;
400
401 if (perf_hpp__use_color() && fmt->color)
402 ret = fmt->color(fmt, hpp, he);
403 else
404 ret = fmt->entry(fmt, hpp, he);
405
406 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
407 advance_hpp(hpp, ret);
408 }
409
410 return hpp->buf - start;
411}
412
413static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
414 struct perf_hpp *hpp,
415 struct hists *hists,
416 FILE *fp)
417{
418 const char *sep = symbol_conf.field_sep;
419 struct perf_hpp_fmt *fmt;
420 struct perf_hpp_list_node *fmt_node;
421 char *buf = hpp->buf;
422 size_t size = hpp->size;
423 int ret, printed = 0;
424 bool first = true;
425
426 if (symbol_conf.exclude_other && !he->parent)
427 return 0;
428
429 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
430 advance_hpp(hpp, ret);
431
432 /* the first hpp_list_node is for overhead columns */
433 fmt_node = list_first_entry(&hists->hpp_formats,
434 struct perf_hpp_list_node, list);
435 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
436 /*
437 * If there's no field_sep, we still need
438 * to display initial ' '.
439 */
440 if (!sep || !first) {
441 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
442 advance_hpp(hpp, ret);
443 } else
444 first = false;
445
446 if (perf_hpp__use_color() && fmt->color)
447 ret = fmt->color(fmt, hpp, he);
448 else
449 ret = fmt->entry(fmt, hpp, he);
450
451 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
452 advance_hpp(hpp, ret);
453 }
454
455 if (!sep)
456 ret = scnprintf(hpp->buf, hpp->size, "%*s",
457 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
458 advance_hpp(hpp, ret);
459
460 printed += fprintf(fp, "%s", buf);
461
462 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
463 hpp->buf = buf;
464 hpp->size = size;
465
466 /*
467 * No need to call hist_entry__snprintf_alignment() since this
468 * fmt is always the last column in the hierarchy mode.
469 */
470 if (perf_hpp__use_color() && fmt->color)
471 fmt->color(fmt, hpp, he);
472 else
473 fmt->entry(fmt, hpp, he);
474
475 /*
476 * dynamic entries are right-aligned but we want left-aligned
477 * in the hierarchy mode
478 */
479 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
480 }
481 printed += putc('\n', fp);
482
483 if (symbol_conf.use_callchain && he->leaf) {
484 u64 total = hists__total_period(hists);
485
486 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
487 goto out;
488 }
489
490out:
491 return printed;
492}
493
494static int hist_entry__fprintf(struct hist_entry *he, size_t size,
495 struct hists *hists,
496 char *bf, size_t bfsz, FILE *fp)
497{
498 int ret;
499 struct perf_hpp hpp = {
500 .buf = bf,
501 .size = size,
502 };
503 u64 total_period = hists->stats.total_period;
504
505 if (size == 0 || size > bfsz)
506 size = hpp.size = bfsz;
507
508 if (symbol_conf.report_hierarchy)
509 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
510
511 hist_entry__snprintf(he, &hpp);
512
513 ret = fprintf(fp, "%s\n", bf);
514
515 if (symbol_conf.use_callchain)
516 ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
517
518 return ret;
519}
520
521static int print_hierarchy_indent(const char *sep, int indent,
522 const char *line, FILE *fp)
523{
524 if (sep != NULL || indent < 2)
525 return 0;
526
527 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
528}
529
530static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
531 const char *sep, FILE *fp)
532{
533 bool first_node, first_col;
534 int indent;
535 int depth;
536 unsigned width = 0;
537 unsigned header_width = 0;
538 struct perf_hpp_fmt *fmt;
539 struct perf_hpp_list_node *fmt_node;
540
541 indent = hists->nr_hpp_node;
542
543 /* preserve max indent depth for column headers */
544 print_hierarchy_indent(sep, indent, spaces, fp);
545
546 /* the first hpp_list_node is for overhead columns */
547 fmt_node = list_first_entry(&hists->hpp_formats,
548 struct perf_hpp_list_node, list);
549
550 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
551 fmt->header(fmt, hpp, hists_to_evsel(hists));
552 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
553 }
554
555 /* combine sort headers with ' / ' */
556 first_node = true;
557 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
558 if (!first_node)
559 header_width += fprintf(fp, " / ");
560 first_node = false;
561
562 first_col = true;
563 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
564 if (perf_hpp__should_skip(fmt, hists))
565 continue;
566
567 if (!first_col)
568 header_width += fprintf(fp, "+");
569 first_col = false;
570
571 fmt->header(fmt, hpp, hists_to_evsel(hists));
572 rtrim(hpp->buf);
573
574 header_width += fprintf(fp, "%s", ltrim(hpp->buf));
575 }
576 }
577
578 fprintf(fp, "\n# ");
579
580 /* preserve max indent depth for initial dots */
581 print_hierarchy_indent(sep, indent, dots, fp);
582
583 /* the first hpp_list_node is for overhead columns */
584 fmt_node = list_first_entry(&hists->hpp_formats,
585 struct perf_hpp_list_node, list);
586
587 first_col = true;
588 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
589 if (!first_col)
590 fprintf(fp, "%s", sep ?: "..");
591 first_col = false;
592
593 width = fmt->width(fmt, hpp, hists_to_evsel(hists));
594 fprintf(fp, "%.*s", width, dots);
595 }
596
597 depth = 0;
598 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
599 first_col = true;
600 width = depth * HIERARCHY_INDENT;
601
602 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
603 if (perf_hpp__should_skip(fmt, hists))
604 continue;
605
606 if (!first_col)
607 width++; /* for '+' sign between column header */
608 first_col = false;
609
610 width += fmt->width(fmt, hpp, hists_to_evsel(hists));
611 }
612
613 if (width > header_width)
614 header_width = width;
615
616 depth++;
617 }
618
619 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
620
621 fprintf(fp, "\n#\n");
622
623 return 2;
624}
625
626size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
627 int max_cols, float min_pcnt, FILE *fp)
628{
629 struct perf_hpp_fmt *fmt;
630 struct perf_hpp_list_node *fmt_node;
631 struct rb_node *nd;
632 size_t ret = 0;
633 unsigned int width;
634 const char *sep = symbol_conf.field_sep;
635 int nr_rows = 0;
636 char bf[96];
637 struct perf_hpp dummy_hpp = {
638 .buf = bf,
639 .size = sizeof(bf),
640 };
641 bool first = true;
642 size_t linesz;
643 char *line = NULL;
644 unsigned indent;
645
646 init_rem_hits();
647
648 hists__for_each_format(hists, fmt)
649 perf_hpp__reset_width(fmt, hists);
650
651 if (symbol_conf.col_width_list_str)
652 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
653
654 if (!show_header)
655 goto print_entries;
656
657 fprintf(fp, "# ");
658
659 if (symbol_conf.report_hierarchy) {
660 list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
661 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
662 perf_hpp__reset_width(fmt, hists);
663 }
664 nr_rows += print_hierarchy_header(hists, &dummy_hpp, sep, fp);
665 goto print_entries;
666 }
667
668 hists__for_each_format(hists, fmt) {
669 if (perf_hpp__should_skip(fmt, hists))
670 continue;
671
672 if (!first)
673 fprintf(fp, "%s", sep ?: " ");
674 else
675 first = false;
676
677 fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
678 fprintf(fp, "%s", bf);
679 }
680
681 fprintf(fp, "\n");
682 if (max_rows && ++nr_rows >= max_rows)
683 goto out;
684
685 if (sep)
686 goto print_entries;
687
688 first = true;
689
690 fprintf(fp, "# ");
691
692 hists__for_each_format(hists, fmt) {
693 unsigned int i;
694
695 if (perf_hpp__should_skip(fmt, hists))
696 continue;
697
698 if (!first)
699 fprintf(fp, "%s", sep ?: " ");
700 else
701 first = false;
702
703 width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
704 for (i = 0; i < width; i++)
705 fprintf(fp, ".");
706 }
707
708 fprintf(fp, "\n");
709 if (max_rows && ++nr_rows >= max_rows)
710 goto out;
711
712 fprintf(fp, "#\n");
713 if (max_rows && ++nr_rows >= max_rows)
714 goto out;
715
716print_entries:
717 linesz = hists__sort_list_width(hists) + 3 + 1;
718 linesz += perf_hpp__color_overhead();
719 line = malloc(linesz);
720 if (line == NULL) {
721 ret = -1;
722 goto out;
723 }
724
725 indent = hists__overhead_width(hists) + 4;
726
727 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
728 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
729 float percent;
730
731 if (h->filtered)
732 continue;
733
734 percent = hist_entry__get_percent_limit(h);
735 if (percent < min_pcnt)
736 continue;
737
738 ret += hist_entry__fprintf(h, max_cols, hists, line, linesz, fp);
739
740 if (max_rows && ++nr_rows >= max_rows)
741 break;
742
743 /*
744 * If all children are filtered out or percent-limited,
745 * display "no entry >= x.xx%" message.
746 */
747 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
748 int depth = hists->nr_hpp_node + h->depth + 1;
749
750 print_hierarchy_indent(sep, depth, spaces, fp);
751 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
752
753 if (max_rows && ++nr_rows >= max_rows)
754 break;
755 }
756
757 if (h->ms.map == NULL && verbose > 1) {
758 __map_groups__fprintf_maps(h->thread->mg,
759 MAP__FUNCTION, fp);
760 fprintf(fp, "%.10s end\n", graph_dotted_line);
761 }
762 }
763
764 free(line);
765out:
766 zfree(&rem_sq_bracket);
767
768 return ret;
769}
770
771size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
772{
773 int i;
774 size_t ret = 0;
775
776 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
777 const char *name;
778
779 if (stats->nr_events[i] == 0)
780 continue;
781
782 name = perf_event__name(i);
783 if (!strcmp(name, "UNKNOWN"))
784 continue;
785
786 ret += fprintf(fp, "%16s events: %10d\n", name,
787 stats->nr_events[i]);
788 }
789
790 return ret;
791}
1// SPDX-License-Identifier: GPL-2.0
2#include <stdio.h>
3#include <linux/string.h>
4
5#include "../../util/util.h"
6#include "../../util/hist.h"
7#include "../../util/sort.h"
8#include "../../util/evsel.h"
9#include "../../util/srcline.h"
10#include "../../util/string2.h"
11#include "../../util/thread.h"
12#include "../../util/sane_ctype.h"
13
14static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
15{
16 int i;
17 int ret = fprintf(fp, " ");
18
19 for (i = 0; i < left_margin; i++)
20 ret += fprintf(fp, " ");
21
22 return ret;
23}
24
25static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
26 int left_margin)
27{
28 int i;
29 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
30
31 for (i = 0; i < depth; i++)
32 if (depth_mask & (1 << i))
33 ret += fprintf(fp, "| ");
34 else
35 ret += fprintf(fp, " ");
36
37 ret += fprintf(fp, "\n");
38
39 return ret;
40}
41
42static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
43 struct callchain_list *chain,
44 int depth, int depth_mask, int period,
45 u64 total_samples, int left_margin)
46{
47 int i;
48 size_t ret = 0;
49 char bf[1024], *alloc_str = NULL;
50 char buf[64];
51 const char *str;
52
53 ret += callchain__fprintf_left_margin(fp, left_margin);
54 for (i = 0; i < depth; i++) {
55 if (depth_mask & (1 << i))
56 ret += fprintf(fp, "|");
57 else
58 ret += fprintf(fp, " ");
59 if (!period && i == depth - 1) {
60 ret += fprintf(fp, "--");
61 ret += callchain_node__fprintf_value(node, fp, total_samples);
62 ret += fprintf(fp, "--");
63 } else
64 ret += fprintf(fp, "%s", " ");
65 }
66
67 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
68
69 if (symbol_conf.show_branchflag_count) {
70 callchain_list_counts__printf_value(chain, NULL,
71 buf, sizeof(buf));
72
73 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
74 str = "Not enough memory!";
75 else
76 str = alloc_str;
77 }
78
79 fputs(str, fp);
80 fputc('\n', fp);
81 free(alloc_str);
82
83 return ret;
84}
85
86static struct symbol *rem_sq_bracket;
87static struct callchain_list rem_hits;
88
89static void init_rem_hits(void)
90{
91 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
92 if (!rem_sq_bracket) {
93 fprintf(stderr, "Not enough memory to display remaining hits\n");
94 return;
95 }
96
97 strcpy(rem_sq_bracket->name, "[...]");
98 rem_hits.ms.sym = rem_sq_bracket;
99}
100
101static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
102 u64 total_samples, int depth,
103 int depth_mask, int left_margin)
104{
105 struct rb_node *node, *next;
106 struct callchain_node *child = NULL;
107 struct callchain_list *chain;
108 int new_depth_mask = depth_mask;
109 u64 remaining;
110 size_t ret = 0;
111 int i;
112 uint entries_printed = 0;
113 int cumul_count = 0;
114
115 remaining = total_samples;
116
117 node = rb_first(root);
118 while (node) {
119 u64 new_total;
120 u64 cumul;
121
122 child = rb_entry(node, struct callchain_node, rb_node);
123 cumul = callchain_cumul_hits(child);
124 remaining -= cumul;
125 cumul_count += callchain_cumul_counts(child);
126
127 /*
128 * The depth mask manages the output of pipes that show
129 * the depth. We don't want to keep the pipes of the current
130 * level for the last child of this depth.
131 * Except if we have remaining filtered hits. They will
132 * supersede the last child
133 */
134 next = rb_next(node);
135 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
136 new_depth_mask &= ~(1 << (depth - 1));
137
138 /*
139 * But we keep the older depth mask for the line separator
140 * to keep the level link until we reach the last child
141 */
142 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
143 left_margin);
144 i = 0;
145 list_for_each_entry(chain, &child->val, list) {
146 ret += ipchain__fprintf_graph(fp, child, chain, depth,
147 new_depth_mask, i++,
148 total_samples,
149 left_margin);
150 }
151
152 if (callchain_param.mode == CHAIN_GRAPH_REL)
153 new_total = child->children_hit;
154 else
155 new_total = total_samples;
156
157 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
158 depth + 1,
159 new_depth_mask | (1 << depth),
160 left_margin);
161 node = next;
162 if (++entries_printed == callchain_param.print_limit)
163 break;
164 }
165
166 if (callchain_param.mode == CHAIN_GRAPH_REL &&
167 remaining && remaining != total_samples) {
168 struct callchain_node rem_node = {
169 .hit = remaining,
170 };
171
172 if (!rem_sq_bracket)
173 return ret;
174
175 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
176 rem_node.count = child->parent->children_count - cumul_count;
177 if (rem_node.count <= 0)
178 return ret;
179 }
180
181 new_depth_mask &= ~(1 << (depth - 1));
182 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
183 new_depth_mask, 0, total_samples,
184 left_margin);
185 }
186
187 return ret;
188}
189
190/*
191 * If have one single callchain root, don't bother printing
192 * its percentage (100 % in fractal mode and the same percentage
193 * than the hist in graph mode). This also avoid one level of column.
194 *
195 * However when percent-limit applied, it's possible that single callchain
196 * node have different (non-100% in fractal mode) percentage.
197 */
198static bool need_percent_display(struct rb_node *node, u64 parent_samples)
199{
200 struct callchain_node *cnode;
201
202 if (rb_next(node))
203 return true;
204
205 cnode = rb_entry(node, struct callchain_node, rb_node);
206 return callchain_cumul_hits(cnode) != parent_samples;
207}
208
209static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
210 u64 total_samples, u64 parent_samples,
211 int left_margin)
212{
213 struct callchain_node *cnode;
214 struct callchain_list *chain;
215 u32 entries_printed = 0;
216 bool printed = false;
217 struct rb_node *node;
218 int i = 0;
219 int ret = 0;
220 char bf[1024];
221
222 node = rb_first(root);
223 if (node && !need_percent_display(node, parent_samples)) {
224 cnode = rb_entry(node, struct callchain_node, rb_node);
225 list_for_each_entry(chain, &cnode->val, list) {
226 /*
227 * If we sort by symbol, the first entry is the same than
228 * the symbol. No need to print it otherwise it appears as
229 * displayed twice.
230 */
231 if (!i++ && field_order == NULL &&
232 sort_order && strstarts(sort_order, "sym"))
233 continue;
234
235 if (!printed) {
236 ret += callchain__fprintf_left_margin(fp, left_margin);
237 ret += fprintf(fp, "|\n");
238 ret += callchain__fprintf_left_margin(fp, left_margin);
239 ret += fprintf(fp, "---");
240 left_margin += 3;
241 printed = true;
242 } else
243 ret += callchain__fprintf_left_margin(fp, left_margin);
244
245 ret += fprintf(fp, "%s",
246 callchain_list__sym_name(chain, bf,
247 sizeof(bf),
248 false));
249
250 if (symbol_conf.show_branchflag_count)
251 ret += callchain_list_counts__printf_value(
252 chain, fp, NULL, 0);
253 ret += fprintf(fp, "\n");
254
255 if (++entries_printed == callchain_param.print_limit)
256 break;
257 }
258 root = &cnode->rb_root;
259 }
260
261 if (callchain_param.mode == CHAIN_GRAPH_REL)
262 total_samples = parent_samples;
263
264 ret += __callchain__fprintf_graph(fp, root, total_samples,
265 1, 1, left_margin);
266 if (ret) {
267 /* do not add a blank line if it printed nothing */
268 ret += fprintf(fp, "\n");
269 }
270
271 return ret;
272}
273
274static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
275 u64 total_samples)
276{
277 struct callchain_list *chain;
278 size_t ret = 0;
279 char bf[1024];
280
281 if (!node)
282 return 0;
283
284 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
285
286
287 list_for_each_entry(chain, &node->val, list) {
288 if (chain->ip >= PERF_CONTEXT_MAX)
289 continue;
290 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
291 bf, sizeof(bf), false));
292 }
293
294 return ret;
295}
296
297static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
298 u64 total_samples)
299{
300 size_t ret = 0;
301 u32 entries_printed = 0;
302 struct callchain_node *chain;
303 struct rb_node *rb_node = rb_first(tree);
304
305 while (rb_node) {
306 chain = rb_entry(rb_node, struct callchain_node, rb_node);
307
308 ret += fprintf(fp, " ");
309 ret += callchain_node__fprintf_value(chain, fp, total_samples);
310 ret += fprintf(fp, "\n");
311 ret += __callchain__fprintf_flat(fp, chain, total_samples);
312 ret += fprintf(fp, "\n");
313 if (++entries_printed == callchain_param.print_limit)
314 break;
315
316 rb_node = rb_next(rb_node);
317 }
318
319 return ret;
320}
321
322static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
323{
324 const char *sep = symbol_conf.field_sep ?: ";";
325 struct callchain_list *chain;
326 size_t ret = 0;
327 char bf[1024];
328 bool first;
329
330 if (!node)
331 return 0;
332
333 ret += __callchain__fprintf_folded(fp, node->parent);
334
335 first = (ret == 0);
336 list_for_each_entry(chain, &node->val, list) {
337 if (chain->ip >= PERF_CONTEXT_MAX)
338 continue;
339 ret += fprintf(fp, "%s%s", first ? "" : sep,
340 callchain_list__sym_name(chain,
341 bf, sizeof(bf), false));
342 first = false;
343 }
344
345 return ret;
346}
347
348static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
349 u64 total_samples)
350{
351 size_t ret = 0;
352 u32 entries_printed = 0;
353 struct callchain_node *chain;
354 struct rb_node *rb_node = rb_first(tree);
355
356 while (rb_node) {
357
358 chain = rb_entry(rb_node, struct callchain_node, rb_node);
359
360 ret += callchain_node__fprintf_value(chain, fp, total_samples);
361 ret += fprintf(fp, " ");
362 ret += __callchain__fprintf_folded(fp, chain);
363 ret += fprintf(fp, "\n");
364 if (++entries_printed == callchain_param.print_limit)
365 break;
366
367 rb_node = rb_next(rb_node);
368 }
369
370 return ret;
371}
372
373static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
374 u64 total_samples, int left_margin,
375 FILE *fp)
376{
377 u64 parent_samples = he->stat.period;
378
379 if (symbol_conf.cumulate_callchain)
380 parent_samples = he->stat_acc->period;
381
382 switch (callchain_param.mode) {
383 case CHAIN_GRAPH_REL:
384 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
385 parent_samples, left_margin);
386 break;
387 case CHAIN_GRAPH_ABS:
388 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
389 parent_samples, left_margin);
390 break;
391 case CHAIN_FLAT:
392 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
393 break;
394 case CHAIN_FOLDED:
395 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
396 break;
397 case CHAIN_NONE:
398 break;
399 default:
400 pr_err("Bad callchain mode\n");
401 }
402
403 return 0;
404}
405
406int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
407 struct perf_hpp_list *hpp_list)
408{
409 const char *sep = symbol_conf.field_sep;
410 struct perf_hpp_fmt *fmt;
411 char *start = hpp->buf;
412 int ret;
413 bool first = true;
414
415 if (symbol_conf.exclude_other && !he->parent)
416 return 0;
417
418 perf_hpp_list__for_each_format(hpp_list, fmt) {
419 if (perf_hpp__should_skip(fmt, he->hists))
420 continue;
421
422 /*
423 * If there's no field_sep, we still need
424 * to display initial ' '.
425 */
426 if (!sep || !first) {
427 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
428 advance_hpp(hpp, ret);
429 } else
430 first = false;
431
432 if (perf_hpp__use_color() && fmt->color)
433 ret = fmt->color(fmt, hpp, he);
434 else
435 ret = fmt->entry(fmt, hpp, he);
436
437 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
438 advance_hpp(hpp, ret);
439 }
440
441 return hpp->buf - start;
442}
443
444static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
445{
446 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
447}
448
449static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
450 struct perf_hpp *hpp,
451 struct hists *hists,
452 FILE *fp)
453{
454 const char *sep = symbol_conf.field_sep;
455 struct perf_hpp_fmt *fmt;
456 struct perf_hpp_list_node *fmt_node;
457 char *buf = hpp->buf;
458 size_t size = hpp->size;
459 int ret, printed = 0;
460 bool first = true;
461
462 if (symbol_conf.exclude_other && !he->parent)
463 return 0;
464
465 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
466 advance_hpp(hpp, ret);
467
468 /* the first hpp_list_node is for overhead columns */
469 fmt_node = list_first_entry(&hists->hpp_formats,
470 struct perf_hpp_list_node, list);
471 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
472 /*
473 * If there's no field_sep, we still need
474 * to display initial ' '.
475 */
476 if (!sep || !first) {
477 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
478 advance_hpp(hpp, ret);
479 } else
480 first = false;
481
482 if (perf_hpp__use_color() && fmt->color)
483 ret = fmt->color(fmt, hpp, he);
484 else
485 ret = fmt->entry(fmt, hpp, he);
486
487 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
488 advance_hpp(hpp, ret);
489 }
490
491 if (!sep)
492 ret = scnprintf(hpp->buf, hpp->size, "%*s",
493 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
494 advance_hpp(hpp, ret);
495
496 printed += fprintf(fp, "%s", buf);
497
498 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
499 hpp->buf = buf;
500 hpp->size = size;
501
502 /*
503 * No need to call hist_entry__snprintf_alignment() since this
504 * fmt is always the last column in the hierarchy mode.
505 */
506 if (perf_hpp__use_color() && fmt->color)
507 fmt->color(fmt, hpp, he);
508 else
509 fmt->entry(fmt, hpp, he);
510
511 /*
512 * dynamic entries are right-aligned but we want left-aligned
513 * in the hierarchy mode
514 */
515 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
516 }
517 printed += putc('\n', fp);
518
519 if (symbol_conf.use_callchain && he->leaf) {
520 u64 total = hists__total_period(hists);
521
522 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
523 goto out;
524 }
525
526out:
527 return printed;
528}
529
530static int hist_entry__fprintf(struct hist_entry *he, size_t size,
531 char *bf, size_t bfsz, FILE *fp,
532 bool use_callchain)
533{
534 int ret;
535 int callchain_ret = 0;
536 struct perf_hpp hpp = {
537 .buf = bf,
538 .size = size,
539 };
540 struct hists *hists = he->hists;
541 u64 total_period = hists->stats.total_period;
542
543 if (size == 0 || size > bfsz)
544 size = hpp.size = bfsz;
545
546 if (symbol_conf.report_hierarchy)
547 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
548
549 hist_entry__snprintf(he, &hpp);
550
551 ret = fprintf(fp, "%s\n", bf);
552
553 if (use_callchain)
554 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
555 0, fp);
556
557 ret += callchain_ret;
558
559 return ret;
560}
561
562static int print_hierarchy_indent(const char *sep, int indent,
563 const char *line, FILE *fp)
564{
565 if (sep != NULL || indent < 2)
566 return 0;
567
568 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
569}
570
571static int hists__fprintf_hierarchy_headers(struct hists *hists,
572 struct perf_hpp *hpp, FILE *fp)
573{
574 bool first_node, first_col;
575 int indent;
576 int depth;
577 unsigned width = 0;
578 unsigned header_width = 0;
579 struct perf_hpp_fmt *fmt;
580 struct perf_hpp_list_node *fmt_node;
581 const char *sep = symbol_conf.field_sep;
582
583 indent = hists->nr_hpp_node;
584
585 /* preserve max indent depth for column headers */
586 print_hierarchy_indent(sep, indent, spaces, fp);
587
588 /* the first hpp_list_node is for overhead columns */
589 fmt_node = list_first_entry(&hists->hpp_formats,
590 struct perf_hpp_list_node, list);
591
592 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
593 fmt->header(fmt, hpp, hists, 0, NULL);
594 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
595 }
596
597 /* combine sort headers with ' / ' */
598 first_node = true;
599 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
600 if (!first_node)
601 header_width += fprintf(fp, " / ");
602 first_node = false;
603
604 first_col = true;
605 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
606 if (perf_hpp__should_skip(fmt, hists))
607 continue;
608
609 if (!first_col)
610 header_width += fprintf(fp, "+");
611 first_col = false;
612
613 fmt->header(fmt, hpp, hists, 0, NULL);
614
615 header_width += fprintf(fp, "%s", trim(hpp->buf));
616 }
617 }
618
619 fprintf(fp, "\n# ");
620
621 /* preserve max indent depth for initial dots */
622 print_hierarchy_indent(sep, indent, dots, fp);
623
624 /* the first hpp_list_node is for overhead columns */
625 fmt_node = list_first_entry(&hists->hpp_formats,
626 struct perf_hpp_list_node, list);
627
628 first_col = true;
629 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
630 if (!first_col)
631 fprintf(fp, "%s", sep ?: "..");
632 first_col = false;
633
634 width = fmt->width(fmt, hpp, hists);
635 fprintf(fp, "%.*s", width, dots);
636 }
637
638 depth = 0;
639 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
640 first_col = true;
641 width = depth * HIERARCHY_INDENT;
642
643 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
644 if (perf_hpp__should_skip(fmt, hists))
645 continue;
646
647 if (!first_col)
648 width++; /* for '+' sign between column header */
649 first_col = false;
650
651 width += fmt->width(fmt, hpp, hists);
652 }
653
654 if (width > header_width)
655 header_width = width;
656
657 depth++;
658 }
659
660 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
661
662 fprintf(fp, "\n#\n");
663
664 return 2;
665}
666
667static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
668 int line, FILE *fp)
669{
670 struct perf_hpp_fmt *fmt;
671 const char *sep = symbol_conf.field_sep;
672 bool first = true;
673 int span = 0;
674
675 hists__for_each_format(hists, fmt) {
676 if (perf_hpp__should_skip(fmt, hists))
677 continue;
678
679 if (!first && !span)
680 fprintf(fp, "%s", sep ?: " ");
681 else
682 first = false;
683
684 fmt->header(fmt, hpp, hists, line, &span);
685
686 if (!span)
687 fprintf(fp, "%s", hpp->buf);
688 }
689}
690
691static int
692hists__fprintf_standard_headers(struct hists *hists,
693 struct perf_hpp *hpp,
694 FILE *fp)
695{
696 struct perf_hpp_list *hpp_list = hists->hpp_list;
697 struct perf_hpp_fmt *fmt;
698 unsigned int width;
699 const char *sep = symbol_conf.field_sep;
700 bool first = true;
701 int line;
702
703 for (line = 0; line < hpp_list->nr_header_lines; line++) {
704 /* first # is displayed one level up */
705 if (line)
706 fprintf(fp, "# ");
707 fprintf_line(hists, hpp, line, fp);
708 fprintf(fp, "\n");
709 }
710
711 if (sep)
712 return hpp_list->nr_header_lines;
713
714 first = true;
715
716 fprintf(fp, "# ");
717
718 hists__for_each_format(hists, fmt) {
719 unsigned int i;
720
721 if (perf_hpp__should_skip(fmt, hists))
722 continue;
723
724 if (!first)
725 fprintf(fp, "%s", sep ?: " ");
726 else
727 first = false;
728
729 width = fmt->width(fmt, hpp, hists);
730 for (i = 0; i < width; i++)
731 fprintf(fp, ".");
732 }
733
734 fprintf(fp, "\n");
735 fprintf(fp, "#\n");
736 return hpp_list->nr_header_lines + 2;
737}
738
739int hists__fprintf_headers(struct hists *hists, FILE *fp)
740{
741 char bf[1024];
742 struct perf_hpp dummy_hpp = {
743 .buf = bf,
744 .size = sizeof(bf),
745 };
746
747 fprintf(fp, "# ");
748
749 if (symbol_conf.report_hierarchy)
750 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
751 else
752 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
753
754}
755
756size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
757 int max_cols, float min_pcnt, FILE *fp,
758 bool use_callchain)
759{
760 struct rb_node *nd;
761 size_t ret = 0;
762 const char *sep = symbol_conf.field_sep;
763 int nr_rows = 0;
764 size_t linesz;
765 char *line = NULL;
766 unsigned indent;
767
768 init_rem_hits();
769
770 hists__reset_column_width(hists);
771
772 if (symbol_conf.col_width_list_str)
773 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
774
775 if (show_header)
776 nr_rows += hists__fprintf_headers(hists, fp);
777
778 if (max_rows && nr_rows >= max_rows)
779 goto out;
780
781 linesz = hists__sort_list_width(hists) + 3 + 1;
782 linesz += perf_hpp__color_overhead();
783 line = malloc(linesz);
784 if (line == NULL) {
785 ret = -1;
786 goto out;
787 }
788
789 indent = hists__overhead_width(hists) + 4;
790
791 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
792 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
793 float percent;
794
795 if (h->filtered)
796 continue;
797
798 percent = hist_entry__get_percent_limit(h);
799 if (percent < min_pcnt)
800 continue;
801
802 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
803
804 if (max_rows && ++nr_rows >= max_rows)
805 break;
806
807 /*
808 * If all children are filtered out or percent-limited,
809 * display "no entry >= x.xx%" message.
810 */
811 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
812 int depth = hists->nr_hpp_node + h->depth + 1;
813
814 print_hierarchy_indent(sep, depth, spaces, fp);
815 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
816
817 if (max_rows && ++nr_rows >= max_rows)
818 break;
819 }
820
821 if (h->ms.map == NULL && verbose > 1) {
822 __map_groups__fprintf_maps(h->thread->mg,
823 MAP__FUNCTION, fp);
824 fprintf(fp, "%.10s end\n", graph_dotted_line);
825 }
826 }
827
828 free(line);
829out:
830 zfree(&rem_sq_bracket);
831
832 return ret;
833}
834
835size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
836{
837 int i;
838 size_t ret = 0;
839
840 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
841 const char *name;
842
843 name = perf_event__name(i);
844 if (!strcmp(name, "UNKNOWN"))
845 continue;
846
847 ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
848 }
849
850 return ret;
851}