Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdio.h>
  3#include <stdlib.h>
  4#include <linux/string.h>
  5
  6#include "../../util/callchain.h"
  7#include "../../util/debug.h"
  8#include "../../util/event.h"
  9#include "../../util/hist.h"
 10#include "../../util/map.h"
 11#include "../../util/map_groups.h"
 12#include "../../util/symbol.h"
 13#include "../../util/sort.h"
 14#include "../../util/evsel.h"
 15#include "../../util/srcline.h"
 16#include "../../util/string2.h"
 17#include "../../util/thread.h"
 18#include <linux/ctype.h>
 19#include <linux/zalloc.h>
 20
 21static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
 22{
 23	int i;
 24	int ret = fprintf(fp, "            ");
 25
 26	for (i = 0; i < left_margin; i++)
 27		ret += fprintf(fp, " ");
 28
 29	return ret;
 30}
 31
 32static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
 33					  int left_margin)
 34{
 35	int i;
 36	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
 37
 38	for (i = 0; i < depth; i++)
 39		if (depth_mask & (1 << i))
 40			ret += fprintf(fp, "|          ");
 41		else
 42			ret += fprintf(fp, "           ");
 43
 44	ret += fprintf(fp, "\n");
 45
 46	return ret;
 47}
 48
 49static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
 50				     struct callchain_list *chain,
 51				     int depth, int depth_mask, int period,
 52				     u64 total_samples, int left_margin)
 53{
 54	int i;
 55	size_t ret = 0;
 56	char bf[1024], *alloc_str = NULL;
 57	char buf[64];
 58	const char *str;
 59
 60	ret += callchain__fprintf_left_margin(fp, left_margin);
 61	for (i = 0; i < depth; i++) {
 62		if (depth_mask & (1 << i))
 63			ret += fprintf(fp, "|");
 64		else
 65			ret += fprintf(fp, " ");
 66		if (!period && i == depth - 1) {
 67			ret += fprintf(fp, "--");
 68			ret += callchain_node__fprintf_value(node, fp, total_samples);
 69			ret += fprintf(fp, "--");
 70		} else
 71			ret += fprintf(fp, "%s", "          ");
 72	}
 73
 74	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
 75
 76	if (symbol_conf.show_branchflag_count) {
 77		callchain_list_counts__printf_value(chain, NULL,
 78						    buf, sizeof(buf));
 79
 80		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
 81			str = "Not enough memory!";
 82		else
 83			str = alloc_str;
 84	}
 85
 86	fputs(str, fp);
 87	fputc('\n', fp);
 88	free(alloc_str);
 89
 90	return ret;
 91}
 92
 93static struct symbol *rem_sq_bracket;
 94static struct callchain_list rem_hits;
 95
 96static void init_rem_hits(void)
 97{
 98	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
 99	if (!rem_sq_bracket) {
100		fprintf(stderr, "Not enough memory to display remaining hits\n");
101		return;
102	}
103
104	strcpy(rem_sq_bracket->name, "[...]");
105	rem_hits.ms.sym = rem_sq_bracket;
106}
107
108static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
109					 u64 total_samples, int depth,
110					 int depth_mask, int left_margin)
111{
112	struct rb_node *node, *next;
113	struct callchain_node *child = NULL;
114	struct callchain_list *chain;
115	int new_depth_mask = depth_mask;
116	u64 remaining;
117	size_t ret = 0;
118	int i;
119	uint entries_printed = 0;
120	int cumul_count = 0;
121
122	remaining = total_samples;
123
124	node = rb_first(root);
125	while (node) {
126		u64 new_total;
127		u64 cumul;
128
129		child = rb_entry(node, struct callchain_node, rb_node);
130		cumul = callchain_cumul_hits(child);
131		remaining -= cumul;
132		cumul_count += callchain_cumul_counts(child);
133
134		/*
135		 * The depth mask manages the output of pipes that show
136		 * the depth. We don't want to keep the pipes of the current
137		 * level for the last child of this depth.
138		 * Except if we have remaining filtered hits. They will
139		 * supersede the last child
140		 */
141		next = rb_next(node);
142		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
143			new_depth_mask &= ~(1 << (depth - 1));
144
145		/*
146		 * But we keep the older depth mask for the line separator
147		 * to keep the level link until we reach the last child
148		 */
149		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
150						   left_margin);
151		i = 0;
152		list_for_each_entry(chain, &child->val, list) {
153			ret += ipchain__fprintf_graph(fp, child, chain, depth,
154						      new_depth_mask, i++,
155						      total_samples,
156						      left_margin);
157		}
158
159		if (callchain_param.mode == CHAIN_GRAPH_REL)
160			new_total = child->children_hit;
161		else
162			new_total = total_samples;
163
164		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
165						  depth + 1,
166						  new_depth_mask | (1 << depth),
167						  left_margin);
168		node = next;
169		if (++entries_printed == callchain_param.print_limit)
170			break;
171	}
172
173	if (callchain_param.mode == CHAIN_GRAPH_REL &&
174		remaining && remaining != total_samples) {
175		struct callchain_node rem_node = {
176			.hit = remaining,
177		};
178
179		if (!rem_sq_bracket)
180			return ret;
181
182		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
183			rem_node.count = child->parent->children_count - cumul_count;
184			if (rem_node.count <= 0)
185				return ret;
186		}
187
188		new_depth_mask &= ~(1 << (depth - 1));
189		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
190					      new_depth_mask, 0, total_samples,
191					      left_margin);
192	}
193
194	return ret;
195}
196
197/*
198 * If have one single callchain root, don't bother printing
199 * its percentage (100 % in fractal mode and the same percentage
200 * than the hist in graph mode). This also avoid one level of column.
201 *
202 * However when percent-limit applied, it's possible that single callchain
203 * node have different (non-100% in fractal mode) percentage.
204 */
205static bool need_percent_display(struct rb_node *node, u64 parent_samples)
206{
207	struct callchain_node *cnode;
208
209	if (rb_next(node))
210		return true;
211
212	cnode = rb_entry(node, struct callchain_node, rb_node);
213	return callchain_cumul_hits(cnode) != parent_samples;
214}
215
216static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
217				       u64 total_samples, u64 parent_samples,
218				       int left_margin)
219{
220	struct callchain_node *cnode;
221	struct callchain_list *chain;
222	u32 entries_printed = 0;
223	bool printed = false;
224	struct rb_node *node;
225	int i = 0;
226	int ret = 0;
227	char bf[1024];
228
229	node = rb_first(root);
230	if (node && !need_percent_display(node, parent_samples)) {
231		cnode = rb_entry(node, struct callchain_node, rb_node);
232		list_for_each_entry(chain, &cnode->val, list) {
233			/*
234			 * If we sort by symbol, the first entry is the same than
235			 * the symbol. No need to print it otherwise it appears as
236			 * displayed twice.
237			 */
238			if (!i++ && field_order == NULL &&
239			    sort_order && strstarts(sort_order, "sym"))
240				continue;
241
242			if (!printed) {
243				ret += callchain__fprintf_left_margin(fp, left_margin);
244				ret += fprintf(fp, "|\n");
245				ret += callchain__fprintf_left_margin(fp, left_margin);
246				ret += fprintf(fp, "---");
247				left_margin += 3;
248				printed = true;
249			} else
250				ret += callchain__fprintf_left_margin(fp, left_margin);
251
252			ret += fprintf(fp, "%s",
253				       callchain_list__sym_name(chain, bf,
254								sizeof(bf),
255								false));
256
257			if (symbol_conf.show_branchflag_count)
258				ret += callchain_list_counts__printf_value(
259						chain, fp, NULL, 0);
260			ret += fprintf(fp, "\n");
261
262			if (++entries_printed == callchain_param.print_limit)
263				break;
264		}
265		root = &cnode->rb_root;
266	}
267
268	if (callchain_param.mode == CHAIN_GRAPH_REL)
269		total_samples = parent_samples;
270
271	ret += __callchain__fprintf_graph(fp, root, total_samples,
272					  1, 1, left_margin);
273	if (ret) {
274		/* do not add a blank line if it printed nothing */
275		ret += fprintf(fp, "\n");
276	}
277
278	return ret;
279}
280
281static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
282					u64 total_samples)
283{
284	struct callchain_list *chain;
285	size_t ret = 0;
286	char bf[1024];
287
288	if (!node)
289		return 0;
290
291	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
292
293
294	list_for_each_entry(chain, &node->val, list) {
295		if (chain->ip >= PERF_CONTEXT_MAX)
296			continue;
297		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
298					bf, sizeof(bf), false));
299	}
300
301	return ret;
302}
303
304static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
305				      u64 total_samples)
306{
307	size_t ret = 0;
308	u32 entries_printed = 0;
309	struct callchain_node *chain;
310	struct rb_node *rb_node = rb_first(tree);
311
312	while (rb_node) {
313		chain = rb_entry(rb_node, struct callchain_node, rb_node);
314
315		ret += fprintf(fp, "           ");
316		ret += callchain_node__fprintf_value(chain, fp, total_samples);
317		ret += fprintf(fp, "\n");
318		ret += __callchain__fprintf_flat(fp, chain, total_samples);
319		ret += fprintf(fp, "\n");
320		if (++entries_printed == callchain_param.print_limit)
321			break;
322
323		rb_node = rb_next(rb_node);
324	}
325
326	return ret;
327}
328
329static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
330{
331	const char *sep = symbol_conf.field_sep ?: ";";
332	struct callchain_list *chain;
333	size_t ret = 0;
334	char bf[1024];
335	bool first;
336
337	if (!node)
338		return 0;
339
340	ret += __callchain__fprintf_folded(fp, node->parent);
341
342	first = (ret == 0);
343	list_for_each_entry(chain, &node->val, list) {
344		if (chain->ip >= PERF_CONTEXT_MAX)
345			continue;
346		ret += fprintf(fp, "%s%s", first ? "" : sep,
347			       callchain_list__sym_name(chain,
348						bf, sizeof(bf), false));
349		first = false;
350	}
351
352	return ret;
353}
354
355static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
356					u64 total_samples)
357{
358	size_t ret = 0;
359	u32 entries_printed = 0;
360	struct callchain_node *chain;
361	struct rb_node *rb_node = rb_first(tree);
362
363	while (rb_node) {
364
365		chain = rb_entry(rb_node, struct callchain_node, rb_node);
366
367		ret += callchain_node__fprintf_value(chain, fp, total_samples);
368		ret += fprintf(fp, " ");
369		ret += __callchain__fprintf_folded(fp, chain);
370		ret += fprintf(fp, "\n");
371		if (++entries_printed == callchain_param.print_limit)
372			break;
373
374		rb_node = rb_next(rb_node);
375	}
376
377	return ret;
378}
379
380static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
381					    u64 total_samples, int left_margin,
382					    FILE *fp)
383{
384	u64 parent_samples = he->stat.period;
385
386	if (symbol_conf.cumulate_callchain)
387		parent_samples = he->stat_acc->period;
388
389	switch (callchain_param.mode) {
390	case CHAIN_GRAPH_REL:
391		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
392						parent_samples, left_margin);
393		break;
394	case CHAIN_GRAPH_ABS:
395		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
396						parent_samples, left_margin);
397		break;
398	case CHAIN_FLAT:
399		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
400		break;
401	case CHAIN_FOLDED:
402		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
403		break;
404	case CHAIN_NONE:
405		break;
406	default:
407		pr_err("Bad callchain mode\n");
408	}
409
410	return 0;
411}
412
413int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
414			   struct perf_hpp_list *hpp_list)
415{
416	const char *sep = symbol_conf.field_sep;
417	struct perf_hpp_fmt *fmt;
418	char *start = hpp->buf;
419	int ret;
420	bool first = true;
421
422	if (symbol_conf.exclude_other && !he->parent)
423		return 0;
424
425	perf_hpp_list__for_each_format(hpp_list, fmt) {
426		if (perf_hpp__should_skip(fmt, he->hists))
427			continue;
428
429		/*
430		 * If there's no field_sep, we still need
431		 * to display initial '  '.
432		 */
433		if (!sep || !first) {
434			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
435			advance_hpp(hpp, ret);
436		} else
437			first = false;
438
439		if (perf_hpp__use_color() && fmt->color)
440			ret = fmt->color(fmt, hpp, he);
441		else
442			ret = fmt->entry(fmt, hpp, he);
443
444		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
445		advance_hpp(hpp, ret);
446	}
447
448	return hpp->buf - start;
449}
450
451static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
452{
453	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
454}
455
456static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
457					 struct perf_hpp *hpp,
458					 struct hists *hists,
459					 FILE *fp)
460{
461	const char *sep = symbol_conf.field_sep;
462	struct perf_hpp_fmt *fmt;
463	struct perf_hpp_list_node *fmt_node;
464	char *buf = hpp->buf;
465	size_t size = hpp->size;
466	int ret, printed = 0;
467	bool first = true;
468
469	if (symbol_conf.exclude_other && !he->parent)
470		return 0;
471
472	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
473	advance_hpp(hpp, ret);
474
475	/* the first hpp_list_node is for overhead columns */
476	fmt_node = list_first_entry(&hists->hpp_formats,
477				    struct perf_hpp_list_node, list);
478	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
479		/*
480		 * If there's no field_sep, we still need
481		 * to display initial '  '.
482		 */
483		if (!sep || !first) {
484			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
485			advance_hpp(hpp, ret);
486		} else
487			first = false;
488
489		if (perf_hpp__use_color() && fmt->color)
490			ret = fmt->color(fmt, hpp, he);
491		else
492			ret = fmt->entry(fmt, hpp, he);
493
494		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
495		advance_hpp(hpp, ret);
496	}
497
498	if (!sep)
499		ret = scnprintf(hpp->buf, hpp->size, "%*s",
500				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
501	advance_hpp(hpp, ret);
502
503	printed += fprintf(fp, "%s", buf);
504
505	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
506		hpp->buf  = buf;
507		hpp->size = size;
508
509		/*
510		 * No need to call hist_entry__snprintf_alignment() since this
511		 * fmt is always the last column in the hierarchy mode.
512		 */
513		if (perf_hpp__use_color() && fmt->color)
514			fmt->color(fmt, hpp, he);
515		else
516			fmt->entry(fmt, hpp, he);
517
518		/*
519		 * dynamic entries are right-aligned but we want left-aligned
520		 * in the hierarchy mode
521		 */
522		printed += fprintf(fp, "%s%s", sep ?: "  ", skip_spaces(buf));
523	}
524	printed += putc('\n', fp);
525
526	if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
527		u64 total = hists__total_period(hists);
528
529		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
530		goto out;
531	}
532
533out:
534	return printed;
535}
536
537static int hist_entry__block_fprintf(struct hist_entry *he,
538				     char *bf, size_t size,
539				     FILE *fp)
540{
541	struct block_hist *bh = container_of(he, struct block_hist, he);
542	int ret = 0;
543
544	for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
545		struct perf_hpp hpp = {
546			.buf		= bf,
547			.size		= size,
548			.skip		= false,
549		};
550
551		bh->block_idx = i;
552		hist_entry__snprintf(he, &hpp);
553
554		if (!hpp.skip)
555			ret += fprintf(fp, "%s\n", bf);
556	}
557
558	return ret;
559}
560
561static int hist_entry__fprintf(struct hist_entry *he, size_t size,
562			       char *bf, size_t bfsz, FILE *fp,
563			       bool ignore_callchains)
564{
565	int ret;
566	int callchain_ret = 0;
567	struct perf_hpp hpp = {
568		.buf		= bf,
569		.size		= size,
570	};
571	struct hists *hists = he->hists;
572	u64 total_period = hists->stats.total_period;
573
574	if (size == 0 || size > bfsz)
575		size = hpp.size = bfsz;
576
577	if (symbol_conf.report_hierarchy)
578		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
579
580	if (symbol_conf.report_block)
581		return hist_entry__block_fprintf(he, bf, size, fp);
582
583	hist_entry__snprintf(he, &hpp);
584
585	ret = fprintf(fp, "%s\n", bf);
586
587	if (hist_entry__has_callchains(he) && !ignore_callchains)
588		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
589							      0, fp);
590
591	ret += callchain_ret;
592
593	return ret;
594}
595
596static int print_hierarchy_indent(const char *sep, int indent,
597				  const char *line, FILE *fp)
598{
599	int width;
600
601	if (sep != NULL || indent < 2)
602		return 0;
603
604	width = (indent - 2) * HIERARCHY_INDENT;
605
606	return fprintf(fp, "%-*.*s", width, width, line);
607}
608
609static int hists__fprintf_hierarchy_headers(struct hists *hists,
610					    struct perf_hpp *hpp, FILE *fp)
611{
612	bool first_node, first_col;
613	int indent;
614	int depth;
615	unsigned width = 0;
616	unsigned header_width = 0;
617	struct perf_hpp_fmt *fmt;
618	struct perf_hpp_list_node *fmt_node;
619	const char *sep = symbol_conf.field_sep;
620
621	indent = hists->nr_hpp_node;
622
623	/* preserve max indent depth for column headers */
624	print_hierarchy_indent(sep, indent, " ", fp);
625
626	/* the first hpp_list_node is for overhead columns */
627	fmt_node = list_first_entry(&hists->hpp_formats,
628				    struct perf_hpp_list_node, list);
629
630	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
631		fmt->header(fmt, hpp, hists, 0, NULL);
632		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
633	}
634
635	/* combine sort headers with ' / ' */
636	first_node = true;
637	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
638		if (!first_node)
639			header_width += fprintf(fp, " / ");
640		first_node = false;
641
642		first_col = true;
643		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
644			if (perf_hpp__should_skip(fmt, hists))
645				continue;
646
647			if (!first_col)
648				header_width += fprintf(fp, "+");
649			first_col = false;
650
651			fmt->header(fmt, hpp, hists, 0, NULL);
 
652
653			header_width += fprintf(fp, "%s", strim(hpp->buf));
654		}
655	}
656
657	fprintf(fp, "\n# ");
658
659	/* preserve max indent depth for initial dots */
660	print_hierarchy_indent(sep, indent, dots, fp);
661
662	/* the first hpp_list_node is for overhead columns */
663	fmt_node = list_first_entry(&hists->hpp_formats,
664				    struct perf_hpp_list_node, list);
665
666	first_col = true;
667	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
668		if (!first_col)
669			fprintf(fp, "%s", sep ?: "..");
670		first_col = false;
671
672		width = fmt->width(fmt, hpp, hists);
673		fprintf(fp, "%.*s", width, dots);
674	}
675
676	depth = 0;
677	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
678		first_col = true;
679		width = depth * HIERARCHY_INDENT;
680
681		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
682			if (perf_hpp__should_skip(fmt, hists))
683				continue;
684
685			if (!first_col)
686				width++;  /* for '+' sign between column header */
687			first_col = false;
688
689			width += fmt->width(fmt, hpp, hists);
690		}
691
692		if (width > header_width)
693			header_width = width;
694
695		depth++;
696	}
697
698	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
699
700	fprintf(fp, "\n#\n");
701
702	return 2;
703}
704
705static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
706			 int line, FILE *fp)
707{
708	struct perf_hpp_fmt *fmt;
 
 
 
 
709	const char *sep = symbol_conf.field_sep;
 
 
 
 
 
 
710	bool first = true;
711	int span = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
713	hists__for_each_format(hists, fmt) {
714		if (perf_hpp__should_skip(fmt, hists))
715			continue;
716
717		if (!first && !span)
718			fprintf(fp, "%s", sep ?: "  ");
719		else
720			first = false;
721
722		fmt->header(fmt, hpp, hists, line, &span);
723
724		if (!span)
725			fprintf(fp, "%s", hpp->buf);
726	}
727}
728
729static int
730hists__fprintf_standard_headers(struct hists *hists,
731				struct perf_hpp *hpp,
732				FILE *fp)
733{
734	struct perf_hpp_list *hpp_list = hists->hpp_list;
735	struct perf_hpp_fmt *fmt;
736	unsigned int width;
737	const char *sep = symbol_conf.field_sep;
738	bool first = true;
739	int line;
740
741	for (line = 0; line < hpp_list->nr_header_lines; line++) {
742		/* first # is displayed one level up */
743		if (line)
744			fprintf(fp, "# ");
745		fprintf_line(hists, hpp, line, fp);
746		fprintf(fp, "\n");
747	}
748
749	if (sep)
750		return hpp_list->nr_header_lines;
751
752	first = true;
753
754	fprintf(fp, "# ");
755
756	hists__for_each_format(hists, fmt) {
757		unsigned int i;
758
759		if (perf_hpp__should_skip(fmt, hists))
760			continue;
761
762		if (!first)
763			fprintf(fp, "%s", sep ?: "  ");
764		else
765			first = false;
766
767		width = fmt->width(fmt, hpp, hists);
768		for (i = 0; i < width; i++)
769			fprintf(fp, ".");
770	}
771
772	fprintf(fp, "\n");
773	fprintf(fp, "#\n");
774	return hpp_list->nr_header_lines + 2;
775}
776
777int hists__fprintf_headers(struct hists *hists, FILE *fp)
778{
779	char bf[1024];
780	struct perf_hpp dummy_hpp = {
781		.buf	= bf,
782		.size	= sizeof(bf),
783	};
784
785	fprintf(fp, "# ");
786
787	if (symbol_conf.report_hierarchy)
788		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
789	else
790		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
791
792}
793
794size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
795		      int max_cols, float min_pcnt, FILE *fp,
796		      bool ignore_callchains)
797{
798	struct rb_node *nd;
799	size_t ret = 0;
800	const char *sep = symbol_conf.field_sep;
801	int nr_rows = 0;
802	size_t linesz;
803	char *line = NULL;
804	unsigned indent;
805
806	init_rem_hits();
807
808	hists__reset_column_width(hists);
809
810	if (symbol_conf.col_width_list_str)
811		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
812
813	if (show_header)
814		nr_rows += hists__fprintf_headers(hists, fp);
815
816	if (max_rows && nr_rows >= max_rows)
 
817		goto out;
818
 
819	linesz = hists__sort_list_width(hists) + 3 + 1;
820	linesz += perf_hpp__color_overhead();
821	line = malloc(linesz);
822	if (line == NULL) {
823		ret = -1;
824		goto out;
825	}
826
827	indent = hists__overhead_width(hists) + 4;
828
829	for (nd = rb_first_cached(&hists->entries); nd;
830	     nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
831		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
832		float percent;
833
834		if (h->filtered)
835			continue;
836
837		percent = hist_entry__get_percent_limit(h);
838		if (percent < min_pcnt)
839			continue;
840
841		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
842
843		if (max_rows && ++nr_rows >= max_rows)
844			break;
845
846		/*
847		 * If all children are filtered out or percent-limited,
848		 * display "no entry >= x.xx%" message.
849		 */
850		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
851			int depth = hists->nr_hpp_node + h->depth + 1;
852
853			print_hierarchy_indent(sep, depth, " ", fp);
854			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
855
856			if (max_rows && ++nr_rows >= max_rows)
857				break;
858		}
859
860		if (h->ms.map == NULL && verbose > 1) {
861			map_groups__fprintf(h->thread->mg, fp);
 
862			fprintf(fp, "%.10s end\n", graph_dotted_line);
863		}
864	}
865
866	free(line);
867out:
868	zfree(&rem_sq_bracket);
869
870	return ret;
871}
872
873size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
874{
875	int i;
876	size_t ret = 0;
877
878	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
879		const char *name;
880
 
 
 
881		name = perf_event__name(i);
882		if (!strcmp(name, "UNKNOWN"))
883			continue;
884
885		ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
 
886	}
887
888	return ret;
889}
v4.6
 
  1#include <stdio.h>
 
 
  2
  3#include "../../util/util.h"
 
 
  4#include "../../util/hist.h"
 
 
 
  5#include "../../util/sort.h"
  6#include "../../util/evsel.h"
  7
 
 
 
 
  8
  9static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
 10{
 11	int i;
 12	int ret = fprintf(fp, "            ");
 13
 14	for (i = 0; i < left_margin; i++)
 15		ret += fprintf(fp, " ");
 16
 17	return ret;
 18}
 19
 20static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
 21					  int left_margin)
 22{
 23	int i;
 24	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
 25
 26	for (i = 0; i < depth; i++)
 27		if (depth_mask & (1 << i))
 28			ret += fprintf(fp, "|          ");
 29		else
 30			ret += fprintf(fp, "           ");
 31
 32	ret += fprintf(fp, "\n");
 33
 34	return ret;
 35}
 36
 37static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
 38				     struct callchain_list *chain,
 39				     int depth, int depth_mask, int period,
 40				     u64 total_samples, int left_margin)
 41{
 42	int i;
 43	size_t ret = 0;
 44	char bf[1024];
 
 
 45
 46	ret += callchain__fprintf_left_margin(fp, left_margin);
 47	for (i = 0; i < depth; i++) {
 48		if (depth_mask & (1 << i))
 49			ret += fprintf(fp, "|");
 50		else
 51			ret += fprintf(fp, " ");
 52		if (!period && i == depth - 1) {
 53			ret += fprintf(fp, "--");
 54			ret += callchain_node__fprintf_value(node, fp, total_samples);
 55			ret += fprintf(fp, "--");
 56		} else
 57			ret += fprintf(fp, "%s", "          ");
 58	}
 59	fputs(callchain_list__sym_name(chain, bf, sizeof(bf), false), fp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 60	fputc('\n', fp);
 
 
 61	return ret;
 62}
 63
 64static struct symbol *rem_sq_bracket;
 65static struct callchain_list rem_hits;
 66
 67static void init_rem_hits(void)
 68{
 69	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
 70	if (!rem_sq_bracket) {
 71		fprintf(stderr, "Not enough memory to display remaining hits\n");
 72		return;
 73	}
 74
 75	strcpy(rem_sq_bracket->name, "[...]");
 76	rem_hits.ms.sym = rem_sq_bracket;
 77}
 78
 79static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
 80					 u64 total_samples, int depth,
 81					 int depth_mask, int left_margin)
 82{
 83	struct rb_node *node, *next;
 84	struct callchain_node *child = NULL;
 85	struct callchain_list *chain;
 86	int new_depth_mask = depth_mask;
 87	u64 remaining;
 88	size_t ret = 0;
 89	int i;
 90	uint entries_printed = 0;
 91	int cumul_count = 0;
 92
 93	remaining = total_samples;
 94
 95	node = rb_first(root);
 96	while (node) {
 97		u64 new_total;
 98		u64 cumul;
 99
100		child = rb_entry(node, struct callchain_node, rb_node);
101		cumul = callchain_cumul_hits(child);
102		remaining -= cumul;
103		cumul_count += callchain_cumul_counts(child);
104
105		/*
106		 * The depth mask manages the output of pipes that show
107		 * the depth. We don't want to keep the pipes of the current
108		 * level for the last child of this depth.
109		 * Except if we have remaining filtered hits. They will
110		 * supersede the last child
111		 */
112		next = rb_next(node);
113		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
114			new_depth_mask &= ~(1 << (depth - 1));
115
116		/*
117		 * But we keep the older depth mask for the line separator
118		 * to keep the level link until we reach the last child
119		 */
120		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
121						   left_margin);
122		i = 0;
123		list_for_each_entry(chain, &child->val, list) {
124			ret += ipchain__fprintf_graph(fp, child, chain, depth,
125						      new_depth_mask, i++,
126						      total_samples,
127						      left_margin);
128		}
129
130		if (callchain_param.mode == CHAIN_GRAPH_REL)
131			new_total = child->children_hit;
132		else
133			new_total = total_samples;
134
135		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
136						  depth + 1,
137						  new_depth_mask | (1 << depth),
138						  left_margin);
139		node = next;
140		if (++entries_printed == callchain_param.print_limit)
141			break;
142	}
143
144	if (callchain_param.mode == CHAIN_GRAPH_REL &&
145		remaining && remaining != total_samples) {
146		struct callchain_node rem_node = {
147			.hit = remaining,
148		};
149
150		if (!rem_sq_bracket)
151			return ret;
152
153		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
154			rem_node.count = child->parent->children_count - cumul_count;
155			if (rem_node.count <= 0)
156				return ret;
157		}
158
159		new_depth_mask &= ~(1 << (depth - 1));
160		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
161					      new_depth_mask, 0, total_samples,
162					      left_margin);
163	}
164
165	return ret;
166}
167
168/*
169 * If have one single callchain root, don't bother printing
170 * its percentage (100 % in fractal mode and the same percentage
171 * than the hist in graph mode). This also avoid one level of column.
172 *
173 * However when percent-limit applied, it's possible that single callchain
174 * node have different (non-100% in fractal mode) percentage.
175 */
176static bool need_percent_display(struct rb_node *node, u64 parent_samples)
177{
178	struct callchain_node *cnode;
179
180	if (rb_next(node))
181		return true;
182
183	cnode = rb_entry(node, struct callchain_node, rb_node);
184	return callchain_cumul_hits(cnode) != parent_samples;
185}
186
187static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
188				       u64 total_samples, u64 parent_samples,
189				       int left_margin)
190{
191	struct callchain_node *cnode;
192	struct callchain_list *chain;
193	u32 entries_printed = 0;
194	bool printed = false;
195	struct rb_node *node;
196	int i = 0;
197	int ret = 0;
198	char bf[1024];
199
200	node = rb_first(root);
201	if (node && !need_percent_display(node, parent_samples)) {
202		cnode = rb_entry(node, struct callchain_node, rb_node);
203		list_for_each_entry(chain, &cnode->val, list) {
204			/*
205			 * If we sort by symbol, the first entry is the same than
206			 * the symbol. No need to print it otherwise it appears as
207			 * displayed twice.
208			 */
209			if (!i++ && field_order == NULL &&
210			    sort_order && !prefixcmp(sort_order, "sym"))
211				continue;
 
212			if (!printed) {
213				ret += callchain__fprintf_left_margin(fp, left_margin);
214				ret += fprintf(fp, "|\n");
215				ret += callchain__fprintf_left_margin(fp, left_margin);
216				ret += fprintf(fp, "---");
217				left_margin += 3;
218				printed = true;
219			} else
220				ret += callchain__fprintf_left_margin(fp, left_margin);
221
222			ret += fprintf(fp, "%s\n", callchain_list__sym_name(chain, bf, sizeof(bf),
223							false));
 
 
 
 
 
 
 
224
225			if (++entries_printed == callchain_param.print_limit)
226				break;
227		}
228		root = &cnode->rb_root;
229	}
230
231	if (callchain_param.mode == CHAIN_GRAPH_REL)
232		total_samples = parent_samples;
233
234	ret += __callchain__fprintf_graph(fp, root, total_samples,
235					  1, 1, left_margin);
236	if (ret) {
237		/* do not add a blank line if it printed nothing */
238		ret += fprintf(fp, "\n");
239	}
240
241	return ret;
242}
243
244static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
245					u64 total_samples)
246{
247	struct callchain_list *chain;
248	size_t ret = 0;
249	char bf[1024];
250
251	if (!node)
252		return 0;
253
254	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
255
256
257	list_for_each_entry(chain, &node->val, list) {
258		if (chain->ip >= PERF_CONTEXT_MAX)
259			continue;
260		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
261					bf, sizeof(bf), false));
262	}
263
264	return ret;
265}
266
267static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
268				      u64 total_samples)
269{
270	size_t ret = 0;
271	u32 entries_printed = 0;
272	struct callchain_node *chain;
273	struct rb_node *rb_node = rb_first(tree);
274
275	while (rb_node) {
276		chain = rb_entry(rb_node, struct callchain_node, rb_node);
277
278		ret += fprintf(fp, "           ");
279		ret += callchain_node__fprintf_value(chain, fp, total_samples);
280		ret += fprintf(fp, "\n");
281		ret += __callchain__fprintf_flat(fp, chain, total_samples);
282		ret += fprintf(fp, "\n");
283		if (++entries_printed == callchain_param.print_limit)
284			break;
285
286		rb_node = rb_next(rb_node);
287	}
288
289	return ret;
290}
291
292static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
293{
294	const char *sep = symbol_conf.field_sep ?: ";";
295	struct callchain_list *chain;
296	size_t ret = 0;
297	char bf[1024];
298	bool first;
299
300	if (!node)
301		return 0;
302
303	ret += __callchain__fprintf_folded(fp, node->parent);
304
305	first = (ret == 0);
306	list_for_each_entry(chain, &node->val, list) {
307		if (chain->ip >= PERF_CONTEXT_MAX)
308			continue;
309		ret += fprintf(fp, "%s%s", first ? "" : sep,
310			       callchain_list__sym_name(chain,
311						bf, sizeof(bf), false));
312		first = false;
313	}
314
315	return ret;
316}
317
318static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
319					u64 total_samples)
320{
321	size_t ret = 0;
322	u32 entries_printed = 0;
323	struct callchain_node *chain;
324	struct rb_node *rb_node = rb_first(tree);
325
326	while (rb_node) {
327
328		chain = rb_entry(rb_node, struct callchain_node, rb_node);
329
330		ret += callchain_node__fprintf_value(chain, fp, total_samples);
331		ret += fprintf(fp, " ");
332		ret += __callchain__fprintf_folded(fp, chain);
333		ret += fprintf(fp, "\n");
334		if (++entries_printed == callchain_param.print_limit)
335			break;
336
337		rb_node = rb_next(rb_node);
338	}
339
340	return ret;
341}
342
343static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
344					    u64 total_samples, int left_margin,
345					    FILE *fp)
346{
347	u64 parent_samples = he->stat.period;
348
349	if (symbol_conf.cumulate_callchain)
350		parent_samples = he->stat_acc->period;
351
352	switch (callchain_param.mode) {
353	case CHAIN_GRAPH_REL:
354		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
355						parent_samples, left_margin);
356		break;
357	case CHAIN_GRAPH_ABS:
358		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
359						parent_samples, left_margin);
360		break;
361	case CHAIN_FLAT:
362		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
363		break;
364	case CHAIN_FOLDED:
365		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
366		break;
367	case CHAIN_NONE:
368		break;
369	default:
370		pr_err("Bad callchain mode\n");
371	}
372
373	return 0;
374}
375
376static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
 
377{
378	const char *sep = symbol_conf.field_sep;
379	struct perf_hpp_fmt *fmt;
380	char *start = hpp->buf;
381	int ret;
382	bool first = true;
383
384	if (symbol_conf.exclude_other && !he->parent)
385		return 0;
386
387	hists__for_each_format(he->hists, fmt) {
388		if (perf_hpp__should_skip(fmt, he->hists))
389			continue;
390
391		/*
392		 * If there's no field_sep, we still need
393		 * to display initial '  '.
394		 */
395		if (!sep || !first) {
396			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
397			advance_hpp(hpp, ret);
398		} else
399			first = false;
400
401		if (perf_hpp__use_color() && fmt->color)
402			ret = fmt->color(fmt, hpp, he);
403		else
404			ret = fmt->entry(fmt, hpp, he);
405
406		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
407		advance_hpp(hpp, ret);
408	}
409
410	return hpp->buf - start;
411}
412
 
 
 
 
 
413static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
414					 struct perf_hpp *hpp,
415					 struct hists *hists,
416					 FILE *fp)
417{
418	const char *sep = symbol_conf.field_sep;
419	struct perf_hpp_fmt *fmt;
420	struct perf_hpp_list_node *fmt_node;
421	char *buf = hpp->buf;
422	size_t size = hpp->size;
423	int ret, printed = 0;
424	bool first = true;
425
426	if (symbol_conf.exclude_other && !he->parent)
427		return 0;
428
429	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
430	advance_hpp(hpp, ret);
431
432	/* the first hpp_list_node is for overhead columns */
433	fmt_node = list_first_entry(&hists->hpp_formats,
434				    struct perf_hpp_list_node, list);
435	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
436		/*
437		 * If there's no field_sep, we still need
438		 * to display initial '  '.
439		 */
440		if (!sep || !first) {
441			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
442			advance_hpp(hpp, ret);
443		} else
444			first = false;
445
446		if (perf_hpp__use_color() && fmt->color)
447			ret = fmt->color(fmt, hpp, he);
448		else
449			ret = fmt->entry(fmt, hpp, he);
450
451		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
452		advance_hpp(hpp, ret);
453	}
454
455	if (!sep)
456		ret = scnprintf(hpp->buf, hpp->size, "%*s",
457				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
458	advance_hpp(hpp, ret);
459
460	printed += fprintf(fp, "%s", buf);
461
462	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
463		hpp->buf  = buf;
464		hpp->size = size;
465
466		/*
467		 * No need to call hist_entry__snprintf_alignment() since this
468		 * fmt is always the last column in the hierarchy mode.
469		 */
470		if (perf_hpp__use_color() && fmt->color)
471			fmt->color(fmt, hpp, he);
472		else
473			fmt->entry(fmt, hpp, he);
474
475		/*
476		 * dynamic entries are right-aligned but we want left-aligned
477		 * in the hierarchy mode
478		 */
479		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
480	}
481	printed += putc('\n', fp);
482
483	if (symbol_conf.use_callchain && he->leaf) {
484		u64 total = hists__total_period(hists);
485
486		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
487		goto out;
488	}
489
490out:
491	return printed;
492}
493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494static int hist_entry__fprintf(struct hist_entry *he, size_t size,
495			       struct hists *hists,
496			       char *bf, size_t bfsz, FILE *fp)
497{
498	int ret;
 
499	struct perf_hpp hpp = {
500		.buf		= bf,
501		.size		= size,
502	};
 
503	u64 total_period = hists->stats.total_period;
504
505	if (size == 0 || size > bfsz)
506		size = hpp.size = bfsz;
507
508	if (symbol_conf.report_hierarchy)
509		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
510
 
 
 
511	hist_entry__snprintf(he, &hpp);
512
513	ret = fprintf(fp, "%s\n", bf);
514
515	if (symbol_conf.use_callchain)
516		ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
 
 
 
517
518	return ret;
519}
520
521static int print_hierarchy_indent(const char *sep, int indent,
522				  const char *line, FILE *fp)
523{
 
 
524	if (sep != NULL || indent < 2)
525		return 0;
526
527	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
 
 
528}
529
530static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
531				  const char *sep, FILE *fp)
532{
533	bool first_node, first_col;
534	int indent;
535	int depth;
536	unsigned width = 0;
537	unsigned header_width = 0;
538	struct perf_hpp_fmt *fmt;
539	struct perf_hpp_list_node *fmt_node;
 
540
541	indent = hists->nr_hpp_node;
542
543	/* preserve max indent depth for column headers */
544	print_hierarchy_indent(sep, indent, spaces, fp);
545
546	/* the first hpp_list_node is for overhead columns */
547	fmt_node = list_first_entry(&hists->hpp_formats,
548				    struct perf_hpp_list_node, list);
549
550	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
551		fmt->header(fmt, hpp, hists_to_evsel(hists));
552		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
553	}
554
555	/* combine sort headers with ' / ' */
556	first_node = true;
557	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
558		if (!first_node)
559			header_width += fprintf(fp, " / ");
560		first_node = false;
561
562		first_col = true;
563		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
564			if (perf_hpp__should_skip(fmt, hists))
565				continue;
566
567			if (!first_col)
568				header_width += fprintf(fp, "+");
569			first_col = false;
570
571			fmt->header(fmt, hpp, hists_to_evsel(hists));
572			rtrim(hpp->buf);
573
574			header_width += fprintf(fp, "%s", ltrim(hpp->buf));
575		}
576	}
577
578	fprintf(fp, "\n# ");
579
580	/* preserve max indent depth for initial dots */
581	print_hierarchy_indent(sep, indent, dots, fp);
582
583	/* the first hpp_list_node is for overhead columns */
584	fmt_node = list_first_entry(&hists->hpp_formats,
585				    struct perf_hpp_list_node, list);
586
587	first_col = true;
588	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
589		if (!first_col)
590			fprintf(fp, "%s", sep ?: "..");
591		first_col = false;
592
593		width = fmt->width(fmt, hpp, hists_to_evsel(hists));
594		fprintf(fp, "%.*s", width, dots);
595	}
596
597	depth = 0;
598	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
599		first_col = true;
600		width = depth * HIERARCHY_INDENT;
601
602		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
603			if (perf_hpp__should_skip(fmt, hists))
604				continue;
605
606			if (!first_col)
607				width++;  /* for '+' sign between column header */
608			first_col = false;
609
610			width += fmt->width(fmt, hpp, hists_to_evsel(hists));
611		}
612
613		if (width > header_width)
614			header_width = width;
615
616		depth++;
617	}
618
619	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
620
621	fprintf(fp, "\n#\n");
622
623	return 2;
624}
625
626size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
627		      int max_cols, float min_pcnt, FILE *fp)
628{
629	struct perf_hpp_fmt *fmt;
630	struct perf_hpp_list_node *fmt_node;
631	struct rb_node *nd;
632	size_t ret = 0;
633	unsigned int width;
634	const char *sep = symbol_conf.field_sep;
635	int nr_rows = 0;
636	char bf[96];
637	struct perf_hpp dummy_hpp = {
638		.buf	= bf,
639		.size	= sizeof(bf),
640	};
641	bool first = true;
642	size_t linesz;
643	char *line = NULL;
644	unsigned indent;
645
646	init_rem_hits();
647
648	hists__for_each_format(hists, fmt)
649		perf_hpp__reset_width(fmt, hists);
650
651	if (symbol_conf.col_width_list_str)
652		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
653
654	if (!show_header)
655		goto print_entries;
656
657	fprintf(fp, "# ");
658
659	if (symbol_conf.report_hierarchy) {
660		list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
661			perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
662				perf_hpp__reset_width(fmt, hists);
663		}
664		nr_rows += print_hierarchy_header(hists, &dummy_hpp, sep, fp);
665		goto print_entries;
666	}
667
668	hists__for_each_format(hists, fmt) {
669		if (perf_hpp__should_skip(fmt, hists))
670			continue;
671
672		if (!first)
673			fprintf(fp, "%s", sep ?: "  ");
674		else
675			first = false;
676
677		fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
678		fprintf(fp, "%s", bf);
 
 
679	}
 
 
 
 
 
 
 
 
 
 
 
 
 
680
681	fprintf(fp, "\n");
682	if (max_rows && ++nr_rows >= max_rows)
683		goto out;
 
 
 
 
684
685	if (sep)
686		goto print_entries;
687
688	first = true;
689
690	fprintf(fp, "# ");
691
692	hists__for_each_format(hists, fmt) {
693		unsigned int i;
694
695		if (perf_hpp__should_skip(fmt, hists))
696			continue;
697
698		if (!first)
699			fprintf(fp, "%s", sep ?: "  ");
700		else
701			first = false;
702
703		width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
704		for (i = 0; i < width; i++)
705			fprintf(fp, ".");
706	}
707
708	fprintf(fp, "\n");
709	if (max_rows && ++nr_rows >= max_rows)
710		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711
712	fprintf(fp, "#\n");
713	if (max_rows && ++nr_rows >= max_rows)
714		goto out;
715
716print_entries:
717	linesz = hists__sort_list_width(hists) + 3 + 1;
718	linesz += perf_hpp__color_overhead();
719	line = malloc(linesz);
720	if (line == NULL) {
721		ret = -1;
722		goto out;
723	}
724
725	indent = hists__overhead_width(hists) + 4;
726
727	for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
 
728		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
729		float percent;
730
731		if (h->filtered)
732			continue;
733
734		percent = hist_entry__get_percent_limit(h);
735		if (percent < min_pcnt)
736			continue;
737
738		ret += hist_entry__fprintf(h, max_cols, hists, line, linesz, fp);
739
740		if (max_rows && ++nr_rows >= max_rows)
741			break;
742
743		/*
744		 * If all children are filtered out or percent-limited,
745		 * display "no entry >= x.xx%" message.
746		 */
747		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
748			int depth = hists->nr_hpp_node + h->depth + 1;
749
750			print_hierarchy_indent(sep, depth, spaces, fp);
751			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
752
753			if (max_rows && ++nr_rows >= max_rows)
754				break;
755		}
756
757		if (h->ms.map == NULL && verbose > 1) {
758			__map_groups__fprintf_maps(h->thread->mg,
759						   MAP__FUNCTION, fp);
760			fprintf(fp, "%.10s end\n", graph_dotted_line);
761		}
762	}
763
764	free(line);
765out:
766	zfree(&rem_sq_bracket);
767
768	return ret;
769}
770
771size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
772{
773	int i;
774	size_t ret = 0;
775
776	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
777		const char *name;
778
779		if (stats->nr_events[i] == 0)
780			continue;
781
782		name = perf_event__name(i);
783		if (!strcmp(name, "UNKNOWN"))
784			continue;
785
786		ret += fprintf(fp, "%16s events: %10d\n", name,
787			       stats->nr_events[i]);
788	}
789
790	return ret;
791}