Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdio.h>
  3#include <stdlib.h>
  4#include <linux/string.h>
  5
  6#include "../../util/callchain.h"
  7#include "../../util/debug.h"
  8#include "../../util/event.h"
  9#include "../../util/hist.h"
 10#include "../../util/map.h"
 11#include "../../util/map_groups.h"
 12#include "../../util/symbol.h"
 13#include "../../util/sort.h"
 14#include "../../util/evsel.h"
 15#include "../../util/srcline.h"
 16#include "../../util/string2.h"
 17#include "../../util/thread.h"
 
 18#include <linux/ctype.h>
 19#include <linux/zalloc.h>
 20
 21static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
 22{
 23	int i;
 24	int ret = fprintf(fp, "            ");
 25
 26	for (i = 0; i < left_margin; i++)
 27		ret += fprintf(fp, " ");
 28
 29	return ret;
 30}
 31
 32static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
 33					  int left_margin)
 34{
 35	int i;
 36	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
 37
 38	for (i = 0; i < depth; i++)
 39		if (depth_mask & (1 << i))
 40			ret += fprintf(fp, "|          ");
 41		else
 42			ret += fprintf(fp, "           ");
 43
 44	ret += fprintf(fp, "\n");
 45
 46	return ret;
 47}
 48
 49static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
 50				     struct callchain_list *chain,
 51				     int depth, int depth_mask, int period,
 52				     u64 total_samples, int left_margin)
 53{
 54	int i;
 55	size_t ret = 0;
 56	char bf[1024], *alloc_str = NULL;
 57	char buf[64];
 58	const char *str;
 59
 60	ret += callchain__fprintf_left_margin(fp, left_margin);
 61	for (i = 0; i < depth; i++) {
 62		if (depth_mask & (1 << i))
 63			ret += fprintf(fp, "|");
 64		else
 65			ret += fprintf(fp, " ");
 66		if (!period && i == depth - 1) {
 67			ret += fprintf(fp, "--");
 68			ret += callchain_node__fprintf_value(node, fp, total_samples);
 69			ret += fprintf(fp, "--");
 70		} else
 71			ret += fprintf(fp, "%s", "          ");
 72	}
 73
 74	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
 75
 76	if (symbol_conf.show_branchflag_count) {
 77		callchain_list_counts__printf_value(chain, NULL,
 78						    buf, sizeof(buf));
 79
 80		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
 81			str = "Not enough memory!";
 82		else
 83			str = alloc_str;
 84	}
 85
 86	fputs(str, fp);
 87	fputc('\n', fp);
 88	free(alloc_str);
 89
 90	return ret;
 91}
 92
 93static struct symbol *rem_sq_bracket;
 94static struct callchain_list rem_hits;
 95
 96static void init_rem_hits(void)
 97{
 98	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
 99	if (!rem_sq_bracket) {
100		fprintf(stderr, "Not enough memory to display remaining hits\n");
101		return;
102	}
103
104	strcpy(rem_sq_bracket->name, "[...]");
105	rem_hits.ms.sym = rem_sq_bracket;
106}
107
108static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
109					 u64 total_samples, int depth,
110					 int depth_mask, int left_margin)
111{
112	struct rb_node *node, *next;
113	struct callchain_node *child = NULL;
114	struct callchain_list *chain;
115	int new_depth_mask = depth_mask;
116	u64 remaining;
117	size_t ret = 0;
118	int i;
119	uint entries_printed = 0;
120	int cumul_count = 0;
121
122	remaining = total_samples;
123
124	node = rb_first(root);
125	while (node) {
126		u64 new_total;
127		u64 cumul;
128
129		child = rb_entry(node, struct callchain_node, rb_node);
130		cumul = callchain_cumul_hits(child);
131		remaining -= cumul;
132		cumul_count += callchain_cumul_counts(child);
133
134		/*
135		 * The depth mask manages the output of pipes that show
136		 * the depth. We don't want to keep the pipes of the current
137		 * level for the last child of this depth.
138		 * Except if we have remaining filtered hits. They will
139		 * supersede the last child
140		 */
141		next = rb_next(node);
142		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
143			new_depth_mask &= ~(1 << (depth - 1));
144
145		/*
146		 * But we keep the older depth mask for the line separator
147		 * to keep the level link until we reach the last child
148		 */
149		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
150						   left_margin);
151		i = 0;
152		list_for_each_entry(chain, &child->val, list) {
153			ret += ipchain__fprintf_graph(fp, child, chain, depth,
154						      new_depth_mask, i++,
155						      total_samples,
156						      left_margin);
157		}
158
159		if (callchain_param.mode == CHAIN_GRAPH_REL)
160			new_total = child->children_hit;
161		else
162			new_total = total_samples;
163
164		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
165						  depth + 1,
166						  new_depth_mask | (1 << depth),
167						  left_margin);
168		node = next;
169		if (++entries_printed == callchain_param.print_limit)
170			break;
171	}
172
173	if (callchain_param.mode == CHAIN_GRAPH_REL &&
174		remaining && remaining != total_samples) {
175		struct callchain_node rem_node = {
176			.hit = remaining,
177		};
178
179		if (!rem_sq_bracket)
180			return ret;
181
182		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
183			rem_node.count = child->parent->children_count - cumul_count;
184			if (rem_node.count <= 0)
185				return ret;
186		}
187
188		new_depth_mask &= ~(1 << (depth - 1));
189		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
190					      new_depth_mask, 0, total_samples,
191					      left_margin);
192	}
193
194	return ret;
195}
196
197/*
198 * If have one single callchain root, don't bother printing
199 * its percentage (100 % in fractal mode and the same percentage
200 * than the hist in graph mode). This also avoid one level of column.
201 *
202 * However when percent-limit applied, it's possible that single callchain
203 * node have different (non-100% in fractal mode) percentage.
204 */
205static bool need_percent_display(struct rb_node *node, u64 parent_samples)
206{
207	struct callchain_node *cnode;
208
209	if (rb_next(node))
210		return true;
211
212	cnode = rb_entry(node, struct callchain_node, rb_node);
213	return callchain_cumul_hits(cnode) != parent_samples;
214}
215
216static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
217				       u64 total_samples, u64 parent_samples,
218				       int left_margin)
219{
220	struct callchain_node *cnode;
221	struct callchain_list *chain;
222	u32 entries_printed = 0;
223	bool printed = false;
224	struct rb_node *node;
225	int i = 0;
226	int ret = 0;
227	char bf[1024];
228
229	node = rb_first(root);
230	if (node && !need_percent_display(node, parent_samples)) {
231		cnode = rb_entry(node, struct callchain_node, rb_node);
232		list_for_each_entry(chain, &cnode->val, list) {
233			/*
234			 * If we sort by symbol, the first entry is the same than
235			 * the symbol. No need to print it otherwise it appears as
236			 * displayed twice.
237			 */
238			if (!i++ && field_order == NULL &&
239			    sort_order && strstarts(sort_order, "sym"))
240				continue;
241
242			if (!printed) {
243				ret += callchain__fprintf_left_margin(fp, left_margin);
244				ret += fprintf(fp, "|\n");
245				ret += callchain__fprintf_left_margin(fp, left_margin);
246				ret += fprintf(fp, "---");
247				left_margin += 3;
248				printed = true;
249			} else
250				ret += callchain__fprintf_left_margin(fp, left_margin);
251
252			ret += fprintf(fp, "%s",
253				       callchain_list__sym_name(chain, bf,
254								sizeof(bf),
255								false));
256
257			if (symbol_conf.show_branchflag_count)
258				ret += callchain_list_counts__printf_value(
259						chain, fp, NULL, 0);
260			ret += fprintf(fp, "\n");
261
262			if (++entries_printed == callchain_param.print_limit)
263				break;
264		}
265		root = &cnode->rb_root;
266	}
267
268	if (callchain_param.mode == CHAIN_GRAPH_REL)
269		total_samples = parent_samples;
270
271	ret += __callchain__fprintf_graph(fp, root, total_samples,
272					  1, 1, left_margin);
273	if (ret) {
274		/* do not add a blank line if it printed nothing */
275		ret += fprintf(fp, "\n");
276	}
277
278	return ret;
279}
280
281static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
282					u64 total_samples)
283{
284	struct callchain_list *chain;
285	size_t ret = 0;
286	char bf[1024];
287
288	if (!node)
289		return 0;
290
291	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
292
293
294	list_for_each_entry(chain, &node->val, list) {
295		if (chain->ip >= PERF_CONTEXT_MAX)
296			continue;
297		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
298					bf, sizeof(bf), false));
299	}
300
301	return ret;
302}
303
304static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
305				      u64 total_samples)
306{
307	size_t ret = 0;
308	u32 entries_printed = 0;
309	struct callchain_node *chain;
310	struct rb_node *rb_node = rb_first(tree);
311
312	while (rb_node) {
313		chain = rb_entry(rb_node, struct callchain_node, rb_node);
314
315		ret += fprintf(fp, "           ");
316		ret += callchain_node__fprintf_value(chain, fp, total_samples);
317		ret += fprintf(fp, "\n");
318		ret += __callchain__fprintf_flat(fp, chain, total_samples);
319		ret += fprintf(fp, "\n");
320		if (++entries_printed == callchain_param.print_limit)
321			break;
322
323		rb_node = rb_next(rb_node);
324	}
325
326	return ret;
327}
328
329static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
330{
331	const char *sep = symbol_conf.field_sep ?: ";";
332	struct callchain_list *chain;
333	size_t ret = 0;
334	char bf[1024];
335	bool first;
336
337	if (!node)
338		return 0;
339
340	ret += __callchain__fprintf_folded(fp, node->parent);
341
342	first = (ret == 0);
343	list_for_each_entry(chain, &node->val, list) {
344		if (chain->ip >= PERF_CONTEXT_MAX)
345			continue;
346		ret += fprintf(fp, "%s%s", first ? "" : sep,
347			       callchain_list__sym_name(chain,
348						bf, sizeof(bf), false));
349		first = false;
350	}
351
352	return ret;
353}
354
355static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
356					u64 total_samples)
357{
358	size_t ret = 0;
359	u32 entries_printed = 0;
360	struct callchain_node *chain;
361	struct rb_node *rb_node = rb_first(tree);
362
363	while (rb_node) {
364
365		chain = rb_entry(rb_node, struct callchain_node, rb_node);
366
367		ret += callchain_node__fprintf_value(chain, fp, total_samples);
368		ret += fprintf(fp, " ");
369		ret += __callchain__fprintf_folded(fp, chain);
370		ret += fprintf(fp, "\n");
371		if (++entries_printed == callchain_param.print_limit)
372			break;
373
374		rb_node = rb_next(rb_node);
375	}
376
377	return ret;
378}
379
380static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
381					    u64 total_samples, int left_margin,
382					    FILE *fp)
383{
384	u64 parent_samples = he->stat.period;
385
386	if (symbol_conf.cumulate_callchain)
387		parent_samples = he->stat_acc->period;
388
389	switch (callchain_param.mode) {
390	case CHAIN_GRAPH_REL:
391		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
392						parent_samples, left_margin);
393		break;
394	case CHAIN_GRAPH_ABS:
395		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
396						parent_samples, left_margin);
397		break;
398	case CHAIN_FLAT:
399		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
400		break;
401	case CHAIN_FOLDED:
402		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
403		break;
404	case CHAIN_NONE:
405		break;
406	default:
407		pr_err("Bad callchain mode\n");
408	}
409
410	return 0;
411}
412
413int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
414			   struct perf_hpp_list *hpp_list)
415{
416	const char *sep = symbol_conf.field_sep;
417	struct perf_hpp_fmt *fmt;
418	char *start = hpp->buf;
419	int ret;
420	bool first = true;
421
422	if (symbol_conf.exclude_other && !he->parent)
423		return 0;
424
425	perf_hpp_list__for_each_format(hpp_list, fmt) {
426		if (perf_hpp__should_skip(fmt, he->hists))
427			continue;
428
429		/*
430		 * If there's no field_sep, we still need
431		 * to display initial '  '.
432		 */
433		if (!sep || !first) {
434			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
435			advance_hpp(hpp, ret);
436		} else
437			first = false;
438
439		if (perf_hpp__use_color() && fmt->color)
440			ret = fmt->color(fmt, hpp, he);
441		else
442			ret = fmt->entry(fmt, hpp, he);
443
444		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
445		advance_hpp(hpp, ret);
446	}
447
448	return hpp->buf - start;
449}
450
451static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
452{
453	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
454}
455
456static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
457					 struct perf_hpp *hpp,
458					 struct hists *hists,
459					 FILE *fp)
460{
461	const char *sep = symbol_conf.field_sep;
462	struct perf_hpp_fmt *fmt;
463	struct perf_hpp_list_node *fmt_node;
464	char *buf = hpp->buf;
465	size_t size = hpp->size;
466	int ret, printed = 0;
467	bool first = true;
468
469	if (symbol_conf.exclude_other && !he->parent)
470		return 0;
471
472	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
473	advance_hpp(hpp, ret);
474
475	/* the first hpp_list_node is for overhead columns */
476	fmt_node = list_first_entry(&hists->hpp_formats,
477				    struct perf_hpp_list_node, list);
478	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
479		/*
480		 * If there's no field_sep, we still need
481		 * to display initial '  '.
482		 */
483		if (!sep || !first) {
484			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
485			advance_hpp(hpp, ret);
486		} else
487			first = false;
488
489		if (perf_hpp__use_color() && fmt->color)
490			ret = fmt->color(fmt, hpp, he);
491		else
492			ret = fmt->entry(fmt, hpp, he);
493
494		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
495		advance_hpp(hpp, ret);
496	}
497
498	if (!sep)
499		ret = scnprintf(hpp->buf, hpp->size, "%*s",
500				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
501	advance_hpp(hpp, ret);
502
503	printed += fprintf(fp, "%s", buf);
504
505	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
506		hpp->buf  = buf;
507		hpp->size = size;
508
509		/*
510		 * No need to call hist_entry__snprintf_alignment() since this
511		 * fmt is always the last column in the hierarchy mode.
512		 */
513		if (perf_hpp__use_color() && fmt->color)
514			fmt->color(fmt, hpp, he);
515		else
516			fmt->entry(fmt, hpp, he);
517
518		/*
519		 * dynamic entries are right-aligned but we want left-aligned
520		 * in the hierarchy mode
521		 */
522		printed += fprintf(fp, "%s%s", sep ?: "  ", skip_spaces(buf));
523	}
524	printed += putc('\n', fp);
525
526	if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
527		u64 total = hists__total_period(hists);
528
529		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
530		goto out;
531	}
532
533out:
534	return printed;
535}
536
537static int hist_entry__block_fprintf(struct hist_entry *he,
538				     char *bf, size_t size,
539				     FILE *fp)
540{
541	struct block_hist *bh = container_of(he, struct block_hist, he);
542	int ret = 0;
543
544	for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
545		struct perf_hpp hpp = {
546			.buf		= bf,
547			.size		= size,
548			.skip		= false,
549		};
550
551		bh->block_idx = i;
552		hist_entry__snprintf(he, &hpp);
553
554		if (!hpp.skip)
555			ret += fprintf(fp, "%s\n", bf);
556	}
557
558	return ret;
559}
560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561static int hist_entry__fprintf(struct hist_entry *he, size_t size,
562			       char *bf, size_t bfsz, FILE *fp,
563			       bool ignore_callchains)
564{
565	int ret;
566	int callchain_ret = 0;
567	struct perf_hpp hpp = {
568		.buf		= bf,
569		.size		= size,
570	};
571	struct hists *hists = he->hists;
572	u64 total_period = hists->stats.total_period;
573
574	if (size == 0 || size > bfsz)
575		size = hpp.size = bfsz;
576
577	if (symbol_conf.report_hierarchy)
578		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
579
580	if (symbol_conf.report_block)
581		return hist_entry__block_fprintf(he, bf, size, fp);
582
 
 
 
583	hist_entry__snprintf(he, &hpp);
584
585	ret = fprintf(fp, "%s\n", bf);
586
587	if (hist_entry__has_callchains(he) && !ignore_callchains)
588		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
589							      0, fp);
590
591	ret += callchain_ret;
592
593	return ret;
594}
595
596static int print_hierarchy_indent(const char *sep, int indent,
597				  const char *line, FILE *fp)
598{
599	int width;
600
601	if (sep != NULL || indent < 2)
602		return 0;
603
604	width = (indent - 2) * HIERARCHY_INDENT;
605
606	return fprintf(fp, "%-*.*s", width, width, line);
607}
608
609static int hists__fprintf_hierarchy_headers(struct hists *hists,
610					    struct perf_hpp *hpp, FILE *fp)
611{
612	bool first_node, first_col;
613	int indent;
614	int depth;
615	unsigned width = 0;
616	unsigned header_width = 0;
617	struct perf_hpp_fmt *fmt;
618	struct perf_hpp_list_node *fmt_node;
619	const char *sep = symbol_conf.field_sep;
620
621	indent = hists->nr_hpp_node;
622
623	/* preserve max indent depth for column headers */
624	print_hierarchy_indent(sep, indent, " ", fp);
625
626	/* the first hpp_list_node is for overhead columns */
627	fmt_node = list_first_entry(&hists->hpp_formats,
628				    struct perf_hpp_list_node, list);
629
630	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
631		fmt->header(fmt, hpp, hists, 0, NULL);
632		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
633	}
634
635	/* combine sort headers with ' / ' */
636	first_node = true;
637	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
638		if (!first_node)
639			header_width += fprintf(fp, " / ");
640		first_node = false;
641
642		first_col = true;
643		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
644			if (perf_hpp__should_skip(fmt, hists))
645				continue;
646
647			if (!first_col)
648				header_width += fprintf(fp, "+");
649			first_col = false;
650
651			fmt->header(fmt, hpp, hists, 0, NULL);
652
653			header_width += fprintf(fp, "%s", strim(hpp->buf));
654		}
655	}
656
657	fprintf(fp, "\n# ");
658
659	/* preserve max indent depth for initial dots */
660	print_hierarchy_indent(sep, indent, dots, fp);
661
662	/* the first hpp_list_node is for overhead columns */
663	fmt_node = list_first_entry(&hists->hpp_formats,
664				    struct perf_hpp_list_node, list);
665
666	first_col = true;
667	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
668		if (!first_col)
669			fprintf(fp, "%s", sep ?: "..");
670		first_col = false;
671
672		width = fmt->width(fmt, hpp, hists);
673		fprintf(fp, "%.*s", width, dots);
674	}
675
676	depth = 0;
677	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
678		first_col = true;
679		width = depth * HIERARCHY_INDENT;
680
681		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
682			if (perf_hpp__should_skip(fmt, hists))
683				continue;
684
685			if (!first_col)
686				width++;  /* for '+' sign between column header */
687			first_col = false;
688
689			width += fmt->width(fmt, hpp, hists);
690		}
691
692		if (width > header_width)
693			header_width = width;
694
695		depth++;
696	}
697
698	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
699
700	fprintf(fp, "\n#\n");
701
702	return 2;
703}
704
705static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
706			 int line, FILE *fp)
707{
708	struct perf_hpp_fmt *fmt;
709	const char *sep = symbol_conf.field_sep;
710	bool first = true;
711	int span = 0;
712
713	hists__for_each_format(hists, fmt) {
714		if (perf_hpp__should_skip(fmt, hists))
715			continue;
716
717		if (!first && !span)
718			fprintf(fp, "%s", sep ?: "  ");
719		else
720			first = false;
721
722		fmt->header(fmt, hpp, hists, line, &span);
723
724		if (!span)
725			fprintf(fp, "%s", hpp->buf);
726	}
727}
728
729static int
730hists__fprintf_standard_headers(struct hists *hists,
731				struct perf_hpp *hpp,
732				FILE *fp)
733{
734	struct perf_hpp_list *hpp_list = hists->hpp_list;
735	struct perf_hpp_fmt *fmt;
736	unsigned int width;
737	const char *sep = symbol_conf.field_sep;
738	bool first = true;
739	int line;
740
741	for (line = 0; line < hpp_list->nr_header_lines; line++) {
742		/* first # is displayed one level up */
743		if (line)
744			fprintf(fp, "# ");
745		fprintf_line(hists, hpp, line, fp);
746		fprintf(fp, "\n");
747	}
748
749	if (sep)
750		return hpp_list->nr_header_lines;
751
752	first = true;
753
754	fprintf(fp, "# ");
755
756	hists__for_each_format(hists, fmt) {
757		unsigned int i;
758
759		if (perf_hpp__should_skip(fmt, hists))
760			continue;
761
762		if (!first)
763			fprintf(fp, "%s", sep ?: "  ");
764		else
765			first = false;
766
767		width = fmt->width(fmt, hpp, hists);
768		for (i = 0; i < width; i++)
769			fprintf(fp, ".");
770	}
771
772	fprintf(fp, "\n");
773	fprintf(fp, "#\n");
774	return hpp_list->nr_header_lines + 2;
775}
776
777int hists__fprintf_headers(struct hists *hists, FILE *fp)
778{
779	char bf[1024];
780	struct perf_hpp dummy_hpp = {
781		.buf	= bf,
782		.size	= sizeof(bf),
783	};
784
785	fprintf(fp, "# ");
786
787	if (symbol_conf.report_hierarchy)
788		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
789	else
790		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
791
792}
793
794size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
795		      int max_cols, float min_pcnt, FILE *fp,
796		      bool ignore_callchains)
797{
798	struct rb_node *nd;
799	size_t ret = 0;
800	const char *sep = symbol_conf.field_sep;
801	int nr_rows = 0;
802	size_t linesz;
803	char *line = NULL;
804	unsigned indent;
805
806	init_rem_hits();
807
808	hists__reset_column_width(hists);
809
810	if (symbol_conf.col_width_list_str)
811		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
812
813	if (show_header)
814		nr_rows += hists__fprintf_headers(hists, fp);
815
816	if (max_rows && nr_rows >= max_rows)
817		goto out;
818
819	linesz = hists__sort_list_width(hists) + 3 + 1;
820	linesz += perf_hpp__color_overhead();
821	line = malloc(linesz);
822	if (line == NULL) {
823		ret = -1;
824		goto out;
825	}
826
827	indent = hists__overhead_width(hists) + 4;
828
829	for (nd = rb_first_cached(&hists->entries); nd;
830	     nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
831		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
832		float percent;
833
834		if (h->filtered)
835			continue;
836
837		percent = hist_entry__get_percent_limit(h);
 
 
 
 
838		if (percent < min_pcnt)
839			continue;
840
841		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
842
843		if (max_rows && ++nr_rows >= max_rows)
844			break;
845
846		/*
847		 * If all children are filtered out or percent-limited,
848		 * display "no entry >= x.xx%" message.
849		 */
850		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
851			int depth = hists->nr_hpp_node + h->depth + 1;
852
853			print_hierarchy_indent(sep, depth, " ", fp);
854			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
855
856			if (max_rows && ++nr_rows >= max_rows)
857				break;
858		}
859
860		if (h->ms.map == NULL && verbose > 1) {
861			map_groups__fprintf(h->thread->mg, fp);
862			fprintf(fp, "%.10s end\n", graph_dotted_line);
863		}
864	}
865
866	free(line);
867out:
868	zfree(&rem_sq_bracket);
869
870	return ret;
871}
872
873size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
874{
875	int i;
876	size_t ret = 0;
 
877
878	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
879		const char *name;
880
881		name = perf_event__name(i);
882		if (!strcmp(name, "UNKNOWN"))
883			continue;
 
 
884
885		ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
 
 
 
 
 
 
 
886	}
887
888	return ret;
889}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdio.h>
  3#include <stdlib.h>
  4#include <linux/string.h>
  5
  6#include "../../util/callchain.h"
  7#include "../../util/debug.h"
  8#include "../../util/event.h"
  9#include "../../util/hist.h"
 10#include "../../util/map.h"
 11#include "../../util/maps.h"
 12#include "../../util/symbol.h"
 13#include "../../util/sort.h"
 14#include "../../util/evsel.h"
 15#include "../../util/srcline.h"
 16#include "../../util/string2.h"
 17#include "../../util/thread.h"
 18#include "../../util/block-info.h"
 19#include <linux/ctype.h>
 20#include <linux/zalloc.h>
 21
 22static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
 23{
 24	int i;
 25	int ret = fprintf(fp, "            ");
 26
 27	for (i = 0; i < left_margin; i++)
 28		ret += fprintf(fp, " ");
 29
 30	return ret;
 31}
 32
 33static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
 34					  int left_margin)
 35{
 36	int i;
 37	size_t ret = callchain__fprintf_left_margin(fp, left_margin);
 38
 39	for (i = 0; i < depth; i++)
 40		if (depth_mask & (1 << i))
 41			ret += fprintf(fp, "|          ");
 42		else
 43			ret += fprintf(fp, "           ");
 44
 45	ret += fprintf(fp, "\n");
 46
 47	return ret;
 48}
 49
 50static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
 51				     struct callchain_list *chain,
 52				     int depth, int depth_mask, int period,
 53				     u64 total_samples, int left_margin)
 54{
 55	int i;
 56	size_t ret = 0;
 57	char bf[1024], *alloc_str = NULL;
 58	char buf[64];
 59	const char *str;
 60
 61	ret += callchain__fprintf_left_margin(fp, left_margin);
 62	for (i = 0; i < depth; i++) {
 63		if (depth_mask & (1 << i))
 64			ret += fprintf(fp, "|");
 65		else
 66			ret += fprintf(fp, " ");
 67		if (!period && i == depth - 1) {
 68			ret += fprintf(fp, "--");
 69			ret += callchain_node__fprintf_value(node, fp, total_samples);
 70			ret += fprintf(fp, "--");
 71		} else
 72			ret += fprintf(fp, "%s", "          ");
 73	}
 74
 75	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
 76
 77	if (symbol_conf.show_branchflag_count) {
 78		callchain_list_counts__printf_value(chain, NULL,
 79						    buf, sizeof(buf));
 80
 81		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
 82			str = "Not enough memory!";
 83		else
 84			str = alloc_str;
 85	}
 86
 87	fputs(str, fp);
 88	fputc('\n', fp);
 89	free(alloc_str);
 90
 91	return ret;
 92}
 93
 94static struct symbol *rem_sq_bracket;
 95static struct callchain_list rem_hits;
 96
 97static void init_rem_hits(void)
 98{
 99	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
100	if (!rem_sq_bracket) {
101		fprintf(stderr, "Not enough memory to display remaining hits\n");
102		return;
103	}
104
105	strcpy(rem_sq_bracket->name, "[...]");
106	rem_hits.ms.sym = rem_sq_bracket;
107}
108
109static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
110					 u64 total_samples, int depth,
111					 int depth_mask, int left_margin)
112{
113	struct rb_node *node, *next;
114	struct callchain_node *child = NULL;
115	struct callchain_list *chain;
116	int new_depth_mask = depth_mask;
117	u64 remaining;
118	size_t ret = 0;
119	int i;
120	uint entries_printed = 0;
121	int cumul_count = 0;
122
123	remaining = total_samples;
124
125	node = rb_first(root);
126	while (node) {
127		u64 new_total;
128		u64 cumul;
129
130		child = rb_entry(node, struct callchain_node, rb_node);
131		cumul = callchain_cumul_hits(child);
132		remaining -= cumul;
133		cumul_count += callchain_cumul_counts(child);
134
135		/*
136		 * The depth mask manages the output of pipes that show
137		 * the depth. We don't want to keep the pipes of the current
138		 * level for the last child of this depth.
139		 * Except if we have remaining filtered hits. They will
140		 * supersede the last child
141		 */
142		next = rb_next(node);
143		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
144			new_depth_mask &= ~(1 << (depth - 1));
145
146		/*
147		 * But we keep the older depth mask for the line separator
148		 * to keep the level link until we reach the last child
149		 */
150		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
151						   left_margin);
152		i = 0;
153		list_for_each_entry(chain, &child->val, list) {
154			ret += ipchain__fprintf_graph(fp, child, chain, depth,
155						      new_depth_mask, i++,
156						      total_samples,
157						      left_margin);
158		}
159
160		if (callchain_param.mode == CHAIN_GRAPH_REL)
161			new_total = child->children_hit;
162		else
163			new_total = total_samples;
164
165		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
166						  depth + 1,
167						  new_depth_mask | (1 << depth),
168						  left_margin);
169		node = next;
170		if (++entries_printed == callchain_param.print_limit)
171			break;
172	}
173
174	if (callchain_param.mode == CHAIN_GRAPH_REL &&
175		remaining && remaining != total_samples) {
176		struct callchain_node rem_node = {
177			.hit = remaining,
178		};
179
180		if (!rem_sq_bracket)
181			return ret;
182
183		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
184			rem_node.count = child->parent->children_count - cumul_count;
185			if (rem_node.count <= 0)
186				return ret;
187		}
188
189		new_depth_mask &= ~(1 << (depth - 1));
190		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
191					      new_depth_mask, 0, total_samples,
192					      left_margin);
193	}
194
195	return ret;
196}
197
198/*
199 * If have one single callchain root, don't bother printing
200 * its percentage (100 % in fractal mode and the same percentage
201 * than the hist in graph mode). This also avoid one level of column.
202 *
203 * However when percent-limit applied, it's possible that single callchain
204 * node have different (non-100% in fractal mode) percentage.
205 */
206static bool need_percent_display(struct rb_node *node, u64 parent_samples)
207{
208	struct callchain_node *cnode;
209
210	if (rb_next(node))
211		return true;
212
213	cnode = rb_entry(node, struct callchain_node, rb_node);
214	return callchain_cumul_hits(cnode) != parent_samples;
215}
216
217static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
218				       u64 total_samples, u64 parent_samples,
219				       int left_margin)
220{
221	struct callchain_node *cnode;
222	struct callchain_list *chain;
223	u32 entries_printed = 0;
224	bool printed = false;
225	struct rb_node *node;
226	int i = 0;
227	int ret = 0;
228	char bf[1024];
229
230	node = rb_first(root);
231	if (node && !need_percent_display(node, parent_samples)) {
232		cnode = rb_entry(node, struct callchain_node, rb_node);
233		list_for_each_entry(chain, &cnode->val, list) {
234			/*
235			 * If we sort by symbol, the first entry is the same than
236			 * the symbol. No need to print it otherwise it appears as
237			 * displayed twice.
238			 */
239			if (!i++ && field_order == NULL &&
240			    sort_order && strstarts(sort_order, "sym"))
241				continue;
242
243			if (!printed) {
244				ret += callchain__fprintf_left_margin(fp, left_margin);
245				ret += fprintf(fp, "|\n");
246				ret += callchain__fprintf_left_margin(fp, left_margin);
247				ret += fprintf(fp, "---");
248				left_margin += 3;
249				printed = true;
250			} else
251				ret += callchain__fprintf_left_margin(fp, left_margin);
252
253			ret += fprintf(fp, "%s",
254				       callchain_list__sym_name(chain, bf,
255								sizeof(bf),
256								false));
257
258			if (symbol_conf.show_branchflag_count)
259				ret += callchain_list_counts__printf_value(
260						chain, fp, NULL, 0);
261			ret += fprintf(fp, "\n");
262
263			if (++entries_printed == callchain_param.print_limit)
264				break;
265		}
266		root = &cnode->rb_root;
267	}
268
269	if (callchain_param.mode == CHAIN_GRAPH_REL)
270		total_samples = parent_samples;
271
272	ret += __callchain__fprintf_graph(fp, root, total_samples,
273					  1, 1, left_margin);
274	if (ret) {
275		/* do not add a blank line if it printed nothing */
276		ret += fprintf(fp, "\n");
277	}
278
279	return ret;
280}
281
282static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
283					u64 total_samples)
284{
285	struct callchain_list *chain;
286	size_t ret = 0;
287	char bf[1024];
288
289	if (!node)
290		return 0;
291
292	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
293
294
295	list_for_each_entry(chain, &node->val, list) {
296		if (chain->ip >= PERF_CONTEXT_MAX)
297			continue;
298		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
299					bf, sizeof(bf), false));
300	}
301
302	return ret;
303}
304
305static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
306				      u64 total_samples)
307{
308	size_t ret = 0;
309	u32 entries_printed = 0;
310	struct callchain_node *chain;
311	struct rb_node *rb_node = rb_first(tree);
312
313	while (rb_node) {
314		chain = rb_entry(rb_node, struct callchain_node, rb_node);
315
316		ret += fprintf(fp, "           ");
317		ret += callchain_node__fprintf_value(chain, fp, total_samples);
318		ret += fprintf(fp, "\n");
319		ret += __callchain__fprintf_flat(fp, chain, total_samples);
320		ret += fprintf(fp, "\n");
321		if (++entries_printed == callchain_param.print_limit)
322			break;
323
324		rb_node = rb_next(rb_node);
325	}
326
327	return ret;
328}
329
330static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
331{
332	const char *sep = symbol_conf.field_sep ?: ";";
333	struct callchain_list *chain;
334	size_t ret = 0;
335	char bf[1024];
336	bool first;
337
338	if (!node)
339		return 0;
340
341	ret += __callchain__fprintf_folded(fp, node->parent);
342
343	first = (ret == 0);
344	list_for_each_entry(chain, &node->val, list) {
345		if (chain->ip >= PERF_CONTEXT_MAX)
346			continue;
347		ret += fprintf(fp, "%s%s", first ? "" : sep,
348			       callchain_list__sym_name(chain,
349						bf, sizeof(bf), false));
350		first = false;
351	}
352
353	return ret;
354}
355
356static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
357					u64 total_samples)
358{
359	size_t ret = 0;
360	u32 entries_printed = 0;
361	struct callchain_node *chain;
362	struct rb_node *rb_node = rb_first(tree);
363
364	while (rb_node) {
365
366		chain = rb_entry(rb_node, struct callchain_node, rb_node);
367
368		ret += callchain_node__fprintf_value(chain, fp, total_samples);
369		ret += fprintf(fp, " ");
370		ret += __callchain__fprintf_folded(fp, chain);
371		ret += fprintf(fp, "\n");
372		if (++entries_printed == callchain_param.print_limit)
373			break;
374
375		rb_node = rb_next(rb_node);
376	}
377
378	return ret;
379}
380
381static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
382					    u64 total_samples, int left_margin,
383					    FILE *fp)
384{
385	u64 parent_samples = he->stat.period;
386
387	if (symbol_conf.cumulate_callchain)
388		parent_samples = he->stat_acc->period;
389
390	switch (callchain_param.mode) {
391	case CHAIN_GRAPH_REL:
392		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
393						parent_samples, left_margin);
394		break;
395	case CHAIN_GRAPH_ABS:
396		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
397						parent_samples, left_margin);
398		break;
399	case CHAIN_FLAT:
400		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
401		break;
402	case CHAIN_FOLDED:
403		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
404		break;
405	case CHAIN_NONE:
406		break;
407	default:
408		pr_err("Bad callchain mode\n");
409	}
410
411	return 0;
412}
413
414int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
415			   struct perf_hpp_list *hpp_list)
416{
417	const char *sep = symbol_conf.field_sep;
418	struct perf_hpp_fmt *fmt;
419	char *start = hpp->buf;
420	int ret;
421	bool first = true;
422
423	if (symbol_conf.exclude_other && !he->parent)
424		return 0;
425
426	perf_hpp_list__for_each_format(hpp_list, fmt) {
427		if (perf_hpp__should_skip(fmt, he->hists))
428			continue;
429
430		/*
431		 * If there's no field_sep, we still need
432		 * to display initial '  '.
433		 */
434		if (!sep || !first) {
435			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
436			advance_hpp(hpp, ret);
437		} else
438			first = false;
439
440		if (perf_hpp__use_color() && fmt->color)
441			ret = fmt->color(fmt, hpp, he);
442		else
443			ret = fmt->entry(fmt, hpp, he);
444
445		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
446		advance_hpp(hpp, ret);
447	}
448
449	return hpp->buf - start;
450}
451
452static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
453{
454	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
455}
456
457static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
458					 struct perf_hpp *hpp,
459					 struct hists *hists,
460					 FILE *fp)
461{
462	const char *sep = symbol_conf.field_sep;
463	struct perf_hpp_fmt *fmt;
464	struct perf_hpp_list_node *fmt_node;
465	char *buf = hpp->buf;
466	size_t size = hpp->size;
467	int ret, printed = 0;
468	bool first = true;
469
470	if (symbol_conf.exclude_other && !he->parent)
471		return 0;
472
473	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
474	advance_hpp(hpp, ret);
475
476	/* the first hpp_list_node is for overhead columns */
477	fmt_node = list_first_entry(&hists->hpp_formats,
478				    struct perf_hpp_list_node, list);
479	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
480		/*
481		 * If there's no field_sep, we still need
482		 * to display initial '  '.
483		 */
484		if (!sep || !first) {
485			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
486			advance_hpp(hpp, ret);
487		} else
488			first = false;
489
490		if (perf_hpp__use_color() && fmt->color)
491			ret = fmt->color(fmt, hpp, he);
492		else
493			ret = fmt->entry(fmt, hpp, he);
494
495		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
496		advance_hpp(hpp, ret);
497	}
498
499	if (!sep)
500		ret = scnprintf(hpp->buf, hpp->size, "%*s",
501				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
502	advance_hpp(hpp, ret);
503
504	printed += fprintf(fp, "%s", buf);
505
506	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
507		hpp->buf  = buf;
508		hpp->size = size;
509
510		/*
511		 * No need to call hist_entry__snprintf_alignment() since this
512		 * fmt is always the last column in the hierarchy mode.
513		 */
514		if (perf_hpp__use_color() && fmt->color)
515			fmt->color(fmt, hpp, he);
516		else
517			fmt->entry(fmt, hpp, he);
518
519		/*
520		 * dynamic entries are right-aligned but we want left-aligned
521		 * in the hierarchy mode
522		 */
523		printed += fprintf(fp, "%s%s", sep ?: "  ", skip_spaces(buf));
524	}
525	printed += putc('\n', fp);
526
527	if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
528		u64 total = hists__total_period(hists);
529
530		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
531		goto out;
532	}
533
534out:
535	return printed;
536}
537
538static int hist_entry__block_fprintf(struct hist_entry *he,
539				     char *bf, size_t size,
540				     FILE *fp)
541{
542	struct block_hist *bh = container_of(he, struct block_hist, he);
543	int ret = 0;
544
545	for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
546		struct perf_hpp hpp = {
547			.buf		= bf,
548			.size		= size,
549			.skip		= false,
550		};
551
552		bh->block_idx = i;
553		hist_entry__snprintf(he, &hpp);
554
555		if (!hpp.skip)
556			ret += fprintf(fp, "%s\n", bf);
557	}
558
559	return ret;
560}
561
562static int hist_entry__individual_block_fprintf(struct hist_entry *he,
563						char *bf, size_t size,
564						FILE *fp)
565{
566	int ret = 0;
567
568	struct perf_hpp hpp = {
569		.buf		= bf,
570		.size		= size,
571		.skip		= false,
572	};
573
574	hist_entry__snprintf(he, &hpp);
575	if (!hpp.skip)
576		ret += fprintf(fp, "%s\n", bf);
577
578	return ret;
579}
580
581static int hist_entry__fprintf(struct hist_entry *he, size_t size,
582			       char *bf, size_t bfsz, FILE *fp,
583			       bool ignore_callchains)
584{
585	int ret;
586	int callchain_ret = 0;
587	struct perf_hpp hpp = {
588		.buf		= bf,
589		.size		= size,
590	};
591	struct hists *hists = he->hists;
592	u64 total_period = hists->stats.total_period;
593
594	if (size == 0 || size > bfsz)
595		size = hpp.size = bfsz;
596
597	if (symbol_conf.report_hierarchy)
598		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
599
600	if (symbol_conf.report_block)
601		return hist_entry__block_fprintf(he, bf, size, fp);
602
603	if (symbol_conf.report_individual_block)
604		return hist_entry__individual_block_fprintf(he, bf, size, fp);
605
606	hist_entry__snprintf(he, &hpp);
607
608	ret = fprintf(fp, "%s\n", bf);
609
610	if (hist_entry__has_callchains(he) && !ignore_callchains)
611		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
612							      0, fp);
613
614	ret += callchain_ret;
615
616	return ret;
617}
618
619static int print_hierarchy_indent(const char *sep, int indent,
620				  const char *line, FILE *fp)
621{
622	int width;
623
624	if (sep != NULL || indent < 2)
625		return 0;
626
627	width = (indent - 2) * HIERARCHY_INDENT;
628
629	return fprintf(fp, "%-*.*s", width, width, line);
630}
631
632static int hists__fprintf_hierarchy_headers(struct hists *hists,
633					    struct perf_hpp *hpp, FILE *fp)
634{
635	bool first_node, first_col;
636	int indent;
637	int depth;
638	unsigned width = 0;
639	unsigned header_width = 0;
640	struct perf_hpp_fmt *fmt;
641	struct perf_hpp_list_node *fmt_node;
642	const char *sep = symbol_conf.field_sep;
643
644	indent = hists->nr_hpp_node;
645
646	/* preserve max indent depth for column headers */
647	print_hierarchy_indent(sep, indent, " ", fp);
648
649	/* the first hpp_list_node is for overhead columns */
650	fmt_node = list_first_entry(&hists->hpp_formats,
651				    struct perf_hpp_list_node, list);
652
653	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
654		fmt->header(fmt, hpp, hists, 0, NULL);
655		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
656	}
657
658	/* combine sort headers with ' / ' */
659	first_node = true;
660	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
661		if (!first_node)
662			header_width += fprintf(fp, " / ");
663		first_node = false;
664
665		first_col = true;
666		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
667			if (perf_hpp__should_skip(fmt, hists))
668				continue;
669
670			if (!first_col)
671				header_width += fprintf(fp, "+");
672			first_col = false;
673
674			fmt->header(fmt, hpp, hists, 0, NULL);
675
676			header_width += fprintf(fp, "%s", strim(hpp->buf));
677		}
678	}
679
680	fprintf(fp, "\n# ");
681
682	/* preserve max indent depth for initial dots */
683	print_hierarchy_indent(sep, indent, dots, fp);
684
685	/* the first hpp_list_node is for overhead columns */
686	fmt_node = list_first_entry(&hists->hpp_formats,
687				    struct perf_hpp_list_node, list);
688
689	first_col = true;
690	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
691		if (!first_col)
692			fprintf(fp, "%s", sep ?: "..");
693		first_col = false;
694
695		width = fmt->width(fmt, hpp, hists);
696		fprintf(fp, "%.*s", width, dots);
697	}
698
699	depth = 0;
700	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
701		first_col = true;
702		width = depth * HIERARCHY_INDENT;
703
704		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
705			if (perf_hpp__should_skip(fmt, hists))
706				continue;
707
708			if (!first_col)
709				width++;  /* for '+' sign between column header */
710			first_col = false;
711
712			width += fmt->width(fmt, hpp, hists);
713		}
714
715		if (width > header_width)
716			header_width = width;
717
718		depth++;
719	}
720
721	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);
722
723	fprintf(fp, "\n#\n");
724
725	return 2;
726}
727
728static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
729			 int line, FILE *fp)
730{
731	struct perf_hpp_fmt *fmt;
732	const char *sep = symbol_conf.field_sep;
733	bool first = true;
734	int span = 0;
735
736	hists__for_each_format(hists, fmt) {
737		if (perf_hpp__should_skip(fmt, hists))
738			continue;
739
740		if (!first && !span)
741			fprintf(fp, "%s", sep ?: "  ");
742		else
743			first = false;
744
745		fmt->header(fmt, hpp, hists, line, &span);
746
747		if (!span)
748			fprintf(fp, "%s", hpp->buf);
749	}
750}
751
752static int
753hists__fprintf_standard_headers(struct hists *hists,
754				struct perf_hpp *hpp,
755				FILE *fp)
756{
757	struct perf_hpp_list *hpp_list = hists->hpp_list;
758	struct perf_hpp_fmt *fmt;
759	unsigned int width;
760	const char *sep = symbol_conf.field_sep;
761	bool first = true;
762	int line;
763
764	for (line = 0; line < hpp_list->nr_header_lines; line++) {
765		/* first # is displayed one level up */
766		if (line)
767			fprintf(fp, "# ");
768		fprintf_line(hists, hpp, line, fp);
769		fprintf(fp, "\n");
770	}
771
772	if (sep)
773		return hpp_list->nr_header_lines;
774
775	first = true;
776
777	fprintf(fp, "# ");
778
779	hists__for_each_format(hists, fmt) {
780		unsigned int i;
781
782		if (perf_hpp__should_skip(fmt, hists))
783			continue;
784
785		if (!first)
786			fprintf(fp, "%s", sep ?: "  ");
787		else
788			first = false;
789
790		width = fmt->width(fmt, hpp, hists);
791		for (i = 0; i < width; i++)
792			fprintf(fp, ".");
793	}
794
795	fprintf(fp, "\n");
796	fprintf(fp, "#\n");
797	return hpp_list->nr_header_lines + 2;
798}
799
800int hists__fprintf_headers(struct hists *hists, FILE *fp)
801{
802	char bf[1024];
803	struct perf_hpp dummy_hpp = {
804		.buf	= bf,
805		.size	= sizeof(bf),
806	};
807
808	fprintf(fp, "# ");
809
810	if (symbol_conf.report_hierarchy)
811		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
812	else
813		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
814
815}
816
817size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
818		      int max_cols, float min_pcnt, FILE *fp,
819		      bool ignore_callchains)
820{
821	struct rb_node *nd;
822	size_t ret = 0;
823	const char *sep = symbol_conf.field_sep;
824	int nr_rows = 0;
825	size_t linesz;
826	char *line = NULL;
827	unsigned indent;
828
829	init_rem_hits();
830
831	hists__reset_column_width(hists);
832
833	if (symbol_conf.col_width_list_str)
834		perf_hpp__set_user_width(symbol_conf.col_width_list_str);
835
836	if (show_header)
837		nr_rows += hists__fprintf_headers(hists, fp);
838
839	if (max_rows && nr_rows >= max_rows)
840		goto out;
841
842	linesz = hists__sort_list_width(hists) + 3 + 1;
843	linesz += perf_hpp__color_overhead();
844	line = malloc(linesz);
845	if (line == NULL) {
846		ret = -1;
847		goto out;
848	}
849
850	indent = hists__overhead_width(hists) + 4;
851
852	for (nd = rb_first_cached(&hists->entries); nd;
853	     nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
854		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
855		float percent;
856
857		if (h->filtered)
858			continue;
859
860		if (symbol_conf.report_individual_block)
861			percent = block_info__total_cycles_percent(h);
862		else
863			percent = hist_entry__get_percent_limit(h);
864
865		if (percent < min_pcnt)
866			continue;
867
868		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
869
870		if (max_rows && ++nr_rows >= max_rows)
871			break;
872
873		/*
874		 * If all children are filtered out or percent-limited,
875		 * display "no entry >= x.xx%" message.
876		 */
877		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
878			int depth = hists->nr_hpp_node + h->depth + 1;
879
880			print_hierarchy_indent(sep, depth, " ", fp);
881			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
882
883			if (max_rows && ++nr_rows >= max_rows)
884				break;
885		}
886
887		if (h->ms.map == NULL && verbose > 1) {
888			maps__fprintf(thread__maps(h->thread), fp);
889			fprintf(fp, "%.10s end\n", graph_dotted_line);
890		}
891	}
892
893	free(line);
894out:
895	zfree(&rem_sq_bracket);
896
897	return ret;
898}
899
900size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
901{
902	int i;
903	size_t ret = 0;
904	u32 total = stats->nr_events[0];
905
906	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
907		const char *name;
908
909		name = perf_event__name(i);
910		if (!strcmp(name, "UNKNOWN"))
911			continue;
912		if (symbol_conf.skip_empty && !stats->nr_events[i])
913			continue;
914
915		if (i && total) {
916			ret += fprintf(fp, "%20s events: %10d  (%4.1f%%)\n",
917				       name, stats->nr_events[i],
918				       100.0 * stats->nr_events[i] / total);
919		} else {
920			ret += fprintf(fp, "%20s events: %10d\n",
921				       name, stats->nr_events[i]);
922		}
923	}
924
925	return ret;
926}