Loading...
1#include <math.h>
2#include <linux/compiler.h>
3
4#include "../util/hist.h"
5#include "../util/util.h"
6#include "../util/sort.h"
7#include "../util/evsel.h"
8#include "../util/evlist.h"
9
10/* hist period print (hpp) functions */
11
12#define hpp__call_print_fn(hpp, fn, fmt, ...) \
13({ \
14 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
15 advance_hpp(hpp, __ret); \
16 __ret; \
17})
18
19static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
20 hpp_field_fn get_field, const char *fmt, int len,
21 hpp_snprint_fn print_fn, bool fmt_percent)
22{
23 int ret;
24 struct hists *hists = he->hists;
25 struct perf_evsel *evsel = hists_to_evsel(hists);
26 char *buf = hpp->buf;
27 size_t size = hpp->size;
28
29 if (fmt_percent) {
30 double percent = 0.0;
31 u64 total = hists__total_period(hists);
32
33 if (total)
34 percent = 100.0 * get_field(he) / total;
35
36 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
37 } else
38 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
39
40 if (perf_evsel__is_group_event(evsel)) {
41 int prev_idx, idx_delta;
42 struct hist_entry *pair;
43 int nr_members = evsel->nr_members;
44
45 prev_idx = perf_evsel__group_idx(evsel);
46
47 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
48 u64 period = get_field(pair);
49 u64 total = hists__total_period(pair->hists);
50
51 if (!total)
52 continue;
53
54 evsel = hists_to_evsel(pair->hists);
55 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
56
57 while (idx_delta--) {
58 /*
59 * zero-fill group members in the middle which
60 * have no sample
61 */
62 if (fmt_percent) {
63 ret += hpp__call_print_fn(hpp, print_fn,
64 fmt, len, 0.0);
65 } else {
66 ret += hpp__call_print_fn(hpp, print_fn,
67 fmt, len, 0ULL);
68 }
69 }
70
71 if (fmt_percent) {
72 ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
73 100.0 * period / total);
74 } else {
75 ret += hpp__call_print_fn(hpp, print_fn, fmt,
76 len, period);
77 }
78
79 prev_idx = perf_evsel__group_idx(evsel);
80 }
81
82 idx_delta = nr_members - prev_idx - 1;
83
84 while (idx_delta--) {
85 /*
86 * zero-fill group members at last which have no sample
87 */
88 if (fmt_percent) {
89 ret += hpp__call_print_fn(hpp, print_fn,
90 fmt, len, 0.0);
91 } else {
92 ret += hpp__call_print_fn(hpp, print_fn,
93 fmt, len, 0ULL);
94 }
95 }
96 }
97
98 /*
99 * Restore original buf and size as it's where caller expects
100 * the result will be saved.
101 */
102 hpp->buf = buf;
103 hpp->size = size;
104
105 return ret;
106}
107
108int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
109 struct hist_entry *he, hpp_field_fn get_field,
110 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
111{
112 int len = fmt->user_len ?: fmt->len;
113
114 if (symbol_conf.field_sep) {
115 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
116 print_fn, fmt_percent);
117 }
118
119 if (fmt_percent)
120 len -= 2; /* 2 for a space and a % sign */
121 else
122 len -= 1;
123
124 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
125}
126
127int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
128 struct hist_entry *he, hpp_field_fn get_field,
129 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
130{
131 if (!symbol_conf.cumulate_callchain) {
132 int len = fmt->user_len ?: fmt->len;
133 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
134 }
135
136 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
137}
138
139static int field_cmp(u64 field_a, u64 field_b)
140{
141 if (field_a > field_b)
142 return 1;
143 if (field_a < field_b)
144 return -1;
145 return 0;
146}
147
148static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
149 hpp_field_fn get_field)
150{
151 s64 ret;
152 int i, nr_members;
153 struct perf_evsel *evsel;
154 struct hist_entry *pair;
155 u64 *fields_a, *fields_b;
156
157 ret = field_cmp(get_field(a), get_field(b));
158 if (ret || !symbol_conf.event_group)
159 return ret;
160
161 evsel = hists_to_evsel(a->hists);
162 if (!perf_evsel__is_group_event(evsel))
163 return ret;
164
165 nr_members = evsel->nr_members;
166 fields_a = calloc(nr_members, sizeof(*fields_a));
167 fields_b = calloc(nr_members, sizeof(*fields_b));
168
169 if (!fields_a || !fields_b)
170 goto out;
171
172 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
173 evsel = hists_to_evsel(pair->hists);
174 fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
175 }
176
177 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
178 evsel = hists_to_evsel(pair->hists);
179 fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
180 }
181
182 for (i = 1; i < nr_members; i++) {
183 ret = field_cmp(fields_a[i], fields_b[i]);
184 if (ret)
185 break;
186 }
187
188out:
189 free(fields_a);
190 free(fields_b);
191
192 return ret;
193}
194
195static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
196 hpp_field_fn get_field)
197{
198 s64 ret = 0;
199
200 if (symbol_conf.cumulate_callchain) {
201 /*
202 * Put caller above callee when they have equal period.
203 */
204 ret = field_cmp(get_field(a), get_field(b));
205 if (ret)
206 return ret;
207
208 if (a->thread != b->thread || !symbol_conf.use_callchain)
209 return 0;
210
211 ret = b->callchain->max_depth - a->callchain->max_depth;
212 }
213 return ret;
214}
215
216static int hpp__width_fn(struct perf_hpp_fmt *fmt,
217 struct perf_hpp *hpp __maybe_unused,
218 struct perf_evsel *evsel)
219{
220 int len = fmt->user_len ?: fmt->len;
221
222 if (symbol_conf.event_group)
223 len = max(len, evsel->nr_members * fmt->len);
224
225 if (len < (int)strlen(fmt->name))
226 len = strlen(fmt->name);
227
228 return len;
229}
230
231static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
232 struct perf_evsel *evsel)
233{
234 int len = hpp__width_fn(fmt, hpp, evsel);
235 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
236}
237
238static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
239{
240 va_list args;
241 ssize_t ssize = hpp->size;
242 double percent;
243 int ret, len;
244
245 va_start(args, fmt);
246 len = va_arg(args, int);
247 percent = va_arg(args, double);
248 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
249 va_end(args);
250
251 return (ret >= ssize) ? (ssize - 1) : ret;
252}
253
254static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
255{
256 va_list args;
257 ssize_t ssize = hpp->size;
258 int ret;
259
260 va_start(args, fmt);
261 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
262 va_end(args);
263
264 return (ret >= ssize) ? (ssize - 1) : ret;
265}
266
267#define __HPP_COLOR_PERCENT_FN(_type, _field) \
268static u64 he_get_##_field(struct hist_entry *he) \
269{ \
270 return he->stat._field; \
271} \
272 \
273static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
274 struct perf_hpp *hpp, struct hist_entry *he) \
275{ \
276 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
277 hpp_color_scnprintf, true); \
278}
279
280#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
281static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
282 struct perf_hpp *hpp, struct hist_entry *he) \
283{ \
284 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
285 hpp_entry_scnprintf, true); \
286}
287
288#define __HPP_SORT_FN(_type, _field) \
289static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
290 struct hist_entry *a, struct hist_entry *b) \
291{ \
292 return __hpp__sort(a, b, he_get_##_field); \
293}
294
295#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
296static u64 he_get_acc_##_field(struct hist_entry *he) \
297{ \
298 return he->stat_acc->_field; \
299} \
300 \
301static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
302 struct perf_hpp *hpp, struct hist_entry *he) \
303{ \
304 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
305 hpp_color_scnprintf, true); \
306}
307
308#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
309static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
310 struct perf_hpp *hpp, struct hist_entry *he) \
311{ \
312 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
313 hpp_entry_scnprintf, true); \
314}
315
316#define __HPP_SORT_ACC_FN(_type, _field) \
317static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
318 struct hist_entry *a, struct hist_entry *b) \
319{ \
320 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
321}
322
323#define __HPP_ENTRY_RAW_FN(_type, _field) \
324static u64 he_get_raw_##_field(struct hist_entry *he) \
325{ \
326 return he->stat._field; \
327} \
328 \
329static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
330 struct perf_hpp *hpp, struct hist_entry *he) \
331{ \
332 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
333 hpp_entry_scnprintf, false); \
334}
335
336#define __HPP_SORT_RAW_FN(_type, _field) \
337static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
338 struct hist_entry *a, struct hist_entry *b) \
339{ \
340 return __hpp__sort(a, b, he_get_raw_##_field); \
341}
342
343
344#define HPP_PERCENT_FNS(_type, _field) \
345__HPP_COLOR_PERCENT_FN(_type, _field) \
346__HPP_ENTRY_PERCENT_FN(_type, _field) \
347__HPP_SORT_FN(_type, _field)
348
349#define HPP_PERCENT_ACC_FNS(_type, _field) \
350__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
351__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
352__HPP_SORT_ACC_FN(_type, _field)
353
354#define HPP_RAW_FNS(_type, _field) \
355__HPP_ENTRY_RAW_FN(_type, _field) \
356__HPP_SORT_RAW_FN(_type, _field)
357
358HPP_PERCENT_FNS(overhead, period)
359HPP_PERCENT_FNS(overhead_sys, period_sys)
360HPP_PERCENT_FNS(overhead_us, period_us)
361HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
362HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
363HPP_PERCENT_ACC_FNS(overhead_acc, period)
364
365HPP_RAW_FNS(samples, nr_events)
366HPP_RAW_FNS(period, period)
367
368static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
369 struct hist_entry *a __maybe_unused,
370 struct hist_entry *b __maybe_unused)
371{
372 return 0;
373}
374
375static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
376{
377 return a->header == hpp__header_fn;
378}
379
380static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
381{
382 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
383 return false;
384
385 return a->idx == b->idx;
386}
387
388#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
389 { \
390 .name = _name, \
391 .header = hpp__header_fn, \
392 .width = hpp__width_fn, \
393 .color = hpp__color_ ## _fn, \
394 .entry = hpp__entry_ ## _fn, \
395 .cmp = hpp__nop_cmp, \
396 .collapse = hpp__nop_cmp, \
397 .sort = hpp__sort_ ## _fn, \
398 .idx = PERF_HPP__ ## _idx, \
399 .equal = hpp__equal, \
400 }
401
402#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
403 { \
404 .name = _name, \
405 .header = hpp__header_fn, \
406 .width = hpp__width_fn, \
407 .color = hpp__color_ ## _fn, \
408 .entry = hpp__entry_ ## _fn, \
409 .cmp = hpp__nop_cmp, \
410 .collapse = hpp__nop_cmp, \
411 .sort = hpp__sort_ ## _fn, \
412 .idx = PERF_HPP__ ## _idx, \
413 .equal = hpp__equal, \
414 }
415
416#define HPP__PRINT_FNS(_name, _fn, _idx) \
417 { \
418 .name = _name, \
419 .header = hpp__header_fn, \
420 .width = hpp__width_fn, \
421 .entry = hpp__entry_ ## _fn, \
422 .cmp = hpp__nop_cmp, \
423 .collapse = hpp__nop_cmp, \
424 .sort = hpp__sort_ ## _fn, \
425 .idx = PERF_HPP__ ## _idx, \
426 .equal = hpp__equal, \
427 }
428
429struct perf_hpp_fmt perf_hpp__format[] = {
430 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
431 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
432 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
433 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
434 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
435 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
436 HPP__PRINT_FNS("Samples", samples, SAMPLES),
437 HPP__PRINT_FNS("Period", period, PERIOD)
438};
439
440struct perf_hpp_list perf_hpp_list = {
441 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
442 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
443};
444
445#undef HPP__COLOR_PRINT_FNS
446#undef HPP__COLOR_ACC_PRINT_FNS
447#undef HPP__PRINT_FNS
448
449#undef HPP_PERCENT_FNS
450#undef HPP_PERCENT_ACC_FNS
451#undef HPP_RAW_FNS
452
453#undef __HPP_HEADER_FN
454#undef __HPP_WIDTH_FN
455#undef __HPP_COLOR_PERCENT_FN
456#undef __HPP_ENTRY_PERCENT_FN
457#undef __HPP_COLOR_ACC_PERCENT_FN
458#undef __HPP_ENTRY_ACC_PERCENT_FN
459#undef __HPP_ENTRY_RAW_FN
460#undef __HPP_SORT_FN
461#undef __HPP_SORT_ACC_FN
462#undef __HPP_SORT_RAW_FN
463
464
465void perf_hpp__init(void)
466{
467 int i;
468
469 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
470 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
471
472 INIT_LIST_HEAD(&fmt->list);
473
474 /* sort_list may be linked by setup_sorting() */
475 if (fmt->sort_list.next == NULL)
476 INIT_LIST_HEAD(&fmt->sort_list);
477 }
478
479 /*
480 * If user specified field order, no need to setup default fields.
481 */
482 if (is_strict_order(field_order))
483 return;
484
485 if (symbol_conf.cumulate_callchain) {
486 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
487 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
488 }
489
490 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
491
492 if (symbol_conf.show_cpu_utilization) {
493 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
494 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
495
496 if (perf_guest) {
497 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
498 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
499 }
500 }
501
502 if (symbol_conf.show_nr_samples)
503 hpp_dimension__add_output(PERF_HPP__SAMPLES);
504
505 if (symbol_conf.show_total_period)
506 hpp_dimension__add_output(PERF_HPP__PERIOD);
507}
508
509void perf_hpp_list__column_register(struct perf_hpp_list *list,
510 struct perf_hpp_fmt *format)
511{
512 list_add_tail(&format->list, &list->fields);
513}
514
515void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
516 struct perf_hpp_fmt *format)
517{
518 list_add_tail(&format->sort_list, &list->sorts);
519}
520
521void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
522{
523 list_del(&format->list);
524}
525
526void perf_hpp__cancel_cumulate(void)
527{
528 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
529
530 if (is_strict_order(field_order))
531 return;
532
533 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
534 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
535
536 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
537 if (acc->equal(acc, fmt)) {
538 perf_hpp__column_unregister(fmt);
539 continue;
540 }
541
542 if (ovh->equal(ovh, fmt))
543 fmt->name = "Overhead";
544 }
545}
546
547static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
548{
549 return a->equal && a->equal(a, b);
550}
551
552void perf_hpp__setup_output_field(struct perf_hpp_list *list)
553{
554 struct perf_hpp_fmt *fmt;
555
556 /* append sort keys to output field */
557 perf_hpp_list__for_each_sort_list(list, fmt) {
558 struct perf_hpp_fmt *pos;
559
560 perf_hpp_list__for_each_format(list, pos) {
561 if (fmt_equal(fmt, pos))
562 goto next;
563 }
564
565 perf_hpp__column_register(fmt);
566next:
567 continue;
568 }
569}
570
571void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
572{
573 struct perf_hpp_fmt *fmt;
574
575 /* append output fields to sort keys */
576 perf_hpp_list__for_each_format(list, fmt) {
577 struct perf_hpp_fmt *pos;
578
579 perf_hpp_list__for_each_sort_list(list, pos) {
580 if (fmt_equal(fmt, pos))
581 goto next;
582 }
583
584 perf_hpp__register_sort_field(fmt);
585next:
586 continue;
587 }
588}
589
590
591static void fmt_free(struct perf_hpp_fmt *fmt)
592{
593 if (fmt->free)
594 fmt->free(fmt);
595}
596
597void perf_hpp__reset_output_field(struct perf_hpp_list *list)
598{
599 struct perf_hpp_fmt *fmt, *tmp;
600
601 /* reset output fields */
602 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
603 list_del_init(&fmt->list);
604 list_del_init(&fmt->sort_list);
605 fmt_free(fmt);
606 }
607
608 /* reset sort keys */
609 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
610 list_del_init(&fmt->list);
611 list_del_init(&fmt->sort_list);
612 fmt_free(fmt);
613 }
614}
615
616/*
617 * See hists__fprintf to match the column widths
618 */
619unsigned int hists__sort_list_width(struct hists *hists)
620{
621 struct perf_hpp_fmt *fmt;
622 int ret = 0;
623 bool first = true;
624 struct perf_hpp dummy_hpp;
625
626 hists__for_each_format(hists, fmt) {
627 if (perf_hpp__should_skip(fmt, hists))
628 continue;
629
630 if (first)
631 first = false;
632 else
633 ret += 2;
634
635 ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
636 }
637
638 if (verbose && sort__has_sym) /* Addr + origin */
639 ret += 3 + BITS_PER_LONG / 4;
640
641 return ret;
642}
643
644unsigned int hists__overhead_width(struct hists *hists)
645{
646 struct perf_hpp_fmt *fmt;
647 int ret = 0;
648 bool first = true;
649 struct perf_hpp dummy_hpp;
650
651 hists__for_each_format(hists, fmt) {
652 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
653 break;
654
655 if (first)
656 first = false;
657 else
658 ret += 2;
659
660 ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
661 }
662
663 return ret;
664}
665
666void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
667{
668 if (perf_hpp__is_sort_entry(fmt))
669 return perf_hpp__reset_sort_width(fmt, hists);
670
671 if (perf_hpp__is_dynamic_entry(fmt))
672 return;
673
674 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
675
676 switch (fmt->idx) {
677 case PERF_HPP__OVERHEAD:
678 case PERF_HPP__OVERHEAD_SYS:
679 case PERF_HPP__OVERHEAD_US:
680 case PERF_HPP__OVERHEAD_ACC:
681 fmt->len = 8;
682 break;
683
684 case PERF_HPP__OVERHEAD_GUEST_SYS:
685 case PERF_HPP__OVERHEAD_GUEST_US:
686 fmt->len = 9;
687 break;
688
689 case PERF_HPP__SAMPLES:
690 case PERF_HPP__PERIOD:
691 fmt->len = 12;
692 break;
693
694 default:
695 break;
696 }
697}
698
699void perf_hpp__set_user_width(const char *width_list_str)
700{
701 struct perf_hpp_fmt *fmt;
702 const char *ptr = width_list_str;
703
704 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
705 char *p;
706
707 int len = strtol(ptr, &p, 10);
708 fmt->user_len = len;
709
710 if (*p == ',')
711 ptr = p + 1;
712 else
713 break;
714 }
715}
716
717static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
718{
719 struct perf_hpp_list_node *node = NULL;
720 struct perf_hpp_fmt *fmt_copy;
721 bool found = false;
722 bool skip = perf_hpp__should_skip(fmt, hists);
723
724 list_for_each_entry(node, &hists->hpp_formats, list) {
725 if (node->level == fmt->level) {
726 found = true;
727 break;
728 }
729 }
730
731 if (!found) {
732 node = malloc(sizeof(*node));
733 if (node == NULL)
734 return -1;
735
736 node->skip = skip;
737 node->level = fmt->level;
738 perf_hpp_list__init(&node->hpp);
739
740 hists->nr_hpp_node++;
741 list_add_tail(&node->list, &hists->hpp_formats);
742 }
743
744 fmt_copy = perf_hpp_fmt__dup(fmt);
745 if (fmt_copy == NULL)
746 return -1;
747
748 if (!skip)
749 node->skip = false;
750
751 list_add_tail(&fmt_copy->list, &node->hpp.fields);
752 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
753
754 return 0;
755}
756
757int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
758 struct perf_evlist *evlist)
759{
760 struct perf_evsel *evsel;
761 struct perf_hpp_fmt *fmt;
762 struct hists *hists;
763 int ret;
764
765 if (!symbol_conf.report_hierarchy)
766 return 0;
767
768 evlist__for_each(evlist, evsel) {
769 hists = evsel__hists(evsel);
770
771 perf_hpp_list__for_each_sort_list(list, fmt) {
772 if (perf_hpp__is_dynamic_entry(fmt) &&
773 !perf_hpp__defined_dynamic_entry(fmt, hists))
774 continue;
775
776 ret = add_hierarchy_fmt(hists, fmt);
777 if (ret < 0)
778 return ret;
779 }
780 }
781
782 return 0;
783}
1// SPDX-License-Identifier: GPL-2.0
2#include <inttypes.h>
3#include <math.h>
4#include <stdlib.h>
5#include <string.h>
6#include <linux/compiler.h>
7
8#include "../util/callchain.h"
9#include "../util/debug.h"
10#include "../util/hist.h"
11#include "../util/sort.h"
12#include "../util/evsel.h"
13#include "../util/evlist.h"
14#include "../util/thread.h"
15#include "../util/util.h"
16
17/* hist period print (hpp) functions */
18
19#define hpp__call_print_fn(hpp, fn, fmt, ...) \
20({ \
21 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
22 advance_hpp(hpp, __ret); \
23 __ret; \
24})
25
26static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val,
27 int nr_samples, const char *fmt, int len,
28 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
29{
30 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) {
31 double percent = 0.0;
32 u64 total = hists__total_period(hists);
33
34 if (total)
35 percent = 100.0 * val / total;
36
37 return hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
38 }
39
40 if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
41 double avg = nr_samples ? (1.0 * val / nr_samples) : 0;
42
43 return hpp__call_print_fn(hpp, print_fn, fmt, len, avg);
44 }
45
46 return hpp__call_print_fn(hpp, print_fn, fmt, len, val);
47}
48
49struct hpp_fmt_value {
50 struct hists *hists;
51 u64 val;
52 int samples;
53};
54
55static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
56 hpp_field_fn get_field, const char *fmt, int len,
57 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
58{
59 int ret = 0;
60 struct hists *hists = he->hists;
61 struct evsel *evsel = hists_to_evsel(hists);
62 struct evsel *pos;
63 char *buf = hpp->buf;
64 size_t size = hpp->size;
65 int i = 0, nr_members = 1;
66 struct hpp_fmt_value *values;
67
68 if (evsel__is_group_event(evsel))
69 nr_members = evsel->core.nr_members;
70
71 values = calloc(nr_members, sizeof(*values));
72 if (values == NULL)
73 return 0;
74
75 values[0].hists = evsel__hists(evsel);
76 values[0].val = get_field(he);
77 values[0].samples = he->stat.nr_events;
78
79 if (evsel__is_group_event(evsel)) {
80 struct hist_entry *pair;
81
82 for_each_group_member(pos, evsel)
83 values[++i].hists = evsel__hists(pos);
84
85 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
86 for (i = 0; i < nr_members; i++) {
87 if (values[i].hists != pair->hists)
88 continue;
89
90 values[i].val = get_field(pair);
91 values[i].samples = pair->stat.nr_events;
92 break;
93 }
94 }
95 }
96
97 for (i = 0; i < nr_members; i++) {
98 if (symbol_conf.skip_empty &&
99 values[i].hists->stats.nr_samples == 0)
100 continue;
101
102 ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val,
103 values[i].samples, fmt, len,
104 print_fn, fmtype);
105 }
106
107 free(values);
108
109 /*
110 * Restore original buf and size as it's where caller expects
111 * the result will be saved.
112 */
113 hpp->buf = buf;
114 hpp->size = size;
115
116 return ret;
117}
118
119int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
120 struct hist_entry *he, hpp_field_fn get_field,
121 const char *fmtstr, hpp_snprint_fn print_fn,
122 enum perf_hpp_fmt_type fmtype)
123{
124 int len = fmt->user_len ?: fmt->len;
125
126 if (symbol_conf.field_sep) {
127 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
128 print_fn, fmtype);
129 }
130
131 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT)
132 len -= 2; /* 2 for a space and a % sign */
133 else
134 len -= 1;
135
136 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype);
137}
138
139int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
140 struct hist_entry *he, hpp_field_fn get_field,
141 const char *fmtstr, hpp_snprint_fn print_fn,
142 enum perf_hpp_fmt_type fmtype)
143{
144 if (!symbol_conf.cumulate_callchain) {
145 int len = fmt->user_len ?: fmt->len;
146 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
147 }
148
149 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype);
150}
151
152static int field_cmp(u64 field_a, u64 field_b)
153{
154 if (field_a > field_b)
155 return 1;
156 if (field_a < field_b)
157 return -1;
158 return 0;
159}
160
161static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
162 hpp_field_fn get_field, int nr_members,
163 u64 **fields_a, u64 **fields_b)
164{
165 u64 *fa = calloc(nr_members, sizeof(*fa)),
166 *fb = calloc(nr_members, sizeof(*fb));
167 struct hist_entry *pair;
168
169 if (!fa || !fb)
170 goto out_free;
171
172 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
173 struct evsel *evsel = hists_to_evsel(pair->hists);
174 fa[evsel__group_idx(evsel)] = get_field(pair);
175 }
176
177 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
178 struct evsel *evsel = hists_to_evsel(pair->hists);
179 fb[evsel__group_idx(evsel)] = get_field(pair);
180 }
181
182 *fields_a = fa;
183 *fields_b = fb;
184 return 0;
185out_free:
186 free(fa);
187 free(fb);
188 *fields_a = *fields_b = NULL;
189 return -1;
190}
191
192static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
193 hpp_field_fn get_field, int idx)
194{
195 struct evsel *evsel = hists_to_evsel(a->hists);
196 u64 *fields_a, *fields_b;
197 int cmp, nr_members, ret, i;
198
199 cmp = field_cmp(get_field(a), get_field(b));
200 if (!evsel__is_group_event(evsel))
201 return cmp;
202
203 nr_members = evsel->core.nr_members;
204 if (idx < 1 || idx >= nr_members)
205 return cmp;
206
207 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
208 if (ret) {
209 ret = cmp;
210 goto out;
211 }
212
213 ret = field_cmp(fields_a[idx], fields_b[idx]);
214 if (ret)
215 goto out;
216
217 for (i = 1; i < nr_members; i++) {
218 if (i != idx) {
219 ret = field_cmp(fields_a[i], fields_b[i]);
220 if (ret)
221 goto out;
222 }
223 }
224
225out:
226 free(fields_a);
227 free(fields_b);
228
229 return ret;
230}
231
232static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
233 hpp_field_fn get_field)
234{
235 s64 ret;
236 int i, nr_members;
237 struct evsel *evsel;
238 u64 *fields_a, *fields_b;
239
240 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
241 return __hpp__group_sort_idx(a, b, get_field,
242 symbol_conf.group_sort_idx);
243 }
244
245 ret = field_cmp(get_field(a), get_field(b));
246 if (ret || !symbol_conf.event_group)
247 return ret;
248
249 evsel = hists_to_evsel(a->hists);
250 if (!evsel__is_group_event(evsel))
251 return ret;
252
253 nr_members = evsel->core.nr_members;
254 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
255 if (i)
256 goto out;
257
258 for (i = 1; i < nr_members; i++) {
259 ret = field_cmp(fields_a[i], fields_b[i]);
260 if (ret)
261 break;
262 }
263
264out:
265 free(fields_a);
266 free(fields_b);
267
268 return ret;
269}
270
271static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
272 hpp_field_fn get_field)
273{
274 s64 ret = 0;
275
276 if (symbol_conf.cumulate_callchain) {
277 /*
278 * Put caller above callee when they have equal period.
279 */
280 ret = field_cmp(get_field(a), get_field(b));
281 if (ret)
282 return ret;
283
284 if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
285 (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
286 !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
287 return 0;
288
289 ret = b->callchain->max_depth - a->callchain->max_depth;
290 if (callchain_param.order == ORDER_CALLER)
291 ret = -ret;
292 }
293 return ret;
294}
295
296static int hpp__width_fn(struct perf_hpp_fmt *fmt,
297 struct perf_hpp *hpp __maybe_unused,
298 struct hists *hists)
299{
300 int len = fmt->user_len ?: fmt->len;
301 struct evsel *evsel = hists_to_evsel(hists);
302
303 if (symbol_conf.event_group) {
304 int nr = 0;
305 struct evsel *pos;
306
307 for_each_group_evsel(pos, evsel) {
308 if (!symbol_conf.skip_empty ||
309 evsel__hists(pos)->stats.nr_samples)
310 nr++;
311 }
312
313 len = max(len, nr * fmt->len);
314 }
315
316 if (len < (int)strlen(fmt->name))
317 len = strlen(fmt->name);
318
319 return len;
320}
321
322static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
323 struct hists *hists, int line __maybe_unused,
324 int *span __maybe_unused)
325{
326 int len = hpp__width_fn(fmt, hpp, hists);
327 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
328}
329
330int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
331{
332 va_list args;
333 ssize_t ssize = hpp->size;
334 double percent;
335 int ret, len;
336
337 va_start(args, fmt);
338 len = va_arg(args, int);
339 percent = va_arg(args, double);
340 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
341 va_end(args);
342
343 return (ret >= ssize) ? (ssize - 1) : ret;
344}
345
346static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
347{
348 va_list args;
349 ssize_t ssize = hpp->size;
350 int ret;
351
352 va_start(args, fmt);
353 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
354 va_end(args);
355
356 return (ret >= ssize) ? (ssize - 1) : ret;
357}
358
359#define __HPP_COLOR_PERCENT_FN(_type, _field) \
360static u64 he_get_##_field(struct hist_entry *he) \
361{ \
362 return he->stat._field; \
363} \
364 \
365static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
366 struct perf_hpp *hpp, struct hist_entry *he) \
367{ \
368 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
369 hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
370}
371
372#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
373static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
374 struct perf_hpp *hpp, struct hist_entry *he) \
375{ \
376 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
377 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
378}
379
380#define __HPP_SORT_FN(_type, _field) \
381static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
382 struct hist_entry *a, struct hist_entry *b) \
383{ \
384 return __hpp__sort(a, b, he_get_##_field); \
385}
386
387#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
388static u64 he_get_acc_##_field(struct hist_entry *he) \
389{ \
390 return he->stat_acc->_field; \
391} \
392 \
393static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
394 struct perf_hpp *hpp, struct hist_entry *he) \
395{ \
396 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
397 hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
398}
399
400#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
401static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
402 struct perf_hpp *hpp, struct hist_entry *he) \
403{ \
404 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
405 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
406}
407
408#define __HPP_SORT_ACC_FN(_type, _field) \
409static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
410 struct hist_entry *a, struct hist_entry *b) \
411{ \
412 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
413}
414
415#define __HPP_ENTRY_RAW_FN(_type, _field) \
416static u64 he_get_raw_##_field(struct hist_entry *he) \
417{ \
418 return he->stat._field; \
419} \
420 \
421static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
422 struct perf_hpp *hpp, struct hist_entry *he) \
423{ \
424 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
425 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW); \
426}
427
428#define __HPP_SORT_RAW_FN(_type, _field) \
429static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
430 struct hist_entry *a, struct hist_entry *b) \
431{ \
432 return __hpp__sort(a, b, he_get_raw_##_field); \
433}
434
435#define __HPP_ENTRY_AVERAGE_FN(_type, _field) \
436static u64 he_get_##_field(struct hist_entry *he) \
437{ \
438 return he->stat._field; \
439} \
440 \
441static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
442 struct perf_hpp *hpp, struct hist_entry *he) \
443{ \
444 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f", \
445 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE); \
446}
447
448#define __HPP_SORT_AVERAGE_FN(_type, _field) \
449static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
450 struct hist_entry *a, struct hist_entry *b) \
451{ \
452 return __hpp__sort(a, b, he_get_##_field); \
453}
454
455
456#define HPP_PERCENT_FNS(_type, _field) \
457__HPP_COLOR_PERCENT_FN(_type, _field) \
458__HPP_ENTRY_PERCENT_FN(_type, _field) \
459__HPP_SORT_FN(_type, _field)
460
461#define HPP_PERCENT_ACC_FNS(_type, _field) \
462__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
463__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
464__HPP_SORT_ACC_FN(_type, _field)
465
466#define HPP_RAW_FNS(_type, _field) \
467__HPP_ENTRY_RAW_FN(_type, _field) \
468__HPP_SORT_RAW_FN(_type, _field)
469
470#define HPP_AVERAGE_FNS(_type, _field) \
471__HPP_ENTRY_AVERAGE_FN(_type, _field) \
472__HPP_SORT_AVERAGE_FN(_type, _field)
473
474HPP_PERCENT_FNS(overhead, period)
475HPP_PERCENT_FNS(overhead_sys, period_sys)
476HPP_PERCENT_FNS(overhead_us, period_us)
477HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
478HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
479HPP_PERCENT_ACC_FNS(overhead_acc, period)
480
481HPP_RAW_FNS(samples, nr_events)
482HPP_RAW_FNS(period, period)
483
484HPP_AVERAGE_FNS(weight1, weight1)
485HPP_AVERAGE_FNS(weight2, weight2)
486HPP_AVERAGE_FNS(weight3, weight3)
487
488static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
489 struct hist_entry *a __maybe_unused,
490 struct hist_entry *b __maybe_unused)
491{
492 return 0;
493}
494
495static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
496{
497 return a->header == hpp__header_fn;
498}
499
500static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
501{
502 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
503 return false;
504
505 return a->idx == b->idx;
506}
507
508#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
509 { \
510 .name = _name, \
511 .header = hpp__header_fn, \
512 .width = hpp__width_fn, \
513 .color = hpp__color_ ## _fn, \
514 .entry = hpp__entry_ ## _fn, \
515 .cmp = hpp__nop_cmp, \
516 .collapse = hpp__nop_cmp, \
517 .sort = hpp__sort_ ## _fn, \
518 .idx = PERF_HPP__ ## _idx, \
519 .equal = hpp__equal, \
520 }
521
522#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
523 { \
524 .name = _name, \
525 .header = hpp__header_fn, \
526 .width = hpp__width_fn, \
527 .color = hpp__color_ ## _fn, \
528 .entry = hpp__entry_ ## _fn, \
529 .cmp = hpp__nop_cmp, \
530 .collapse = hpp__nop_cmp, \
531 .sort = hpp__sort_ ## _fn, \
532 .idx = PERF_HPP__ ## _idx, \
533 .equal = hpp__equal, \
534 }
535
536#define HPP__PRINT_FNS(_name, _fn, _idx) \
537 { \
538 .name = _name, \
539 .header = hpp__header_fn, \
540 .width = hpp__width_fn, \
541 .entry = hpp__entry_ ## _fn, \
542 .cmp = hpp__nop_cmp, \
543 .collapse = hpp__nop_cmp, \
544 .sort = hpp__sort_ ## _fn, \
545 .idx = PERF_HPP__ ## _idx, \
546 .equal = hpp__equal, \
547 }
548
549struct perf_hpp_fmt perf_hpp__format[] = {
550 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
551 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
552 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
553 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
554 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
555 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
556 HPP__PRINT_FNS("Samples", samples, SAMPLES),
557 HPP__PRINT_FNS("Period", period, PERIOD),
558 HPP__PRINT_FNS("Weight1", weight1, WEIGHT1),
559 HPP__PRINT_FNS("Weight2", weight2, WEIGHT2),
560 HPP__PRINT_FNS("Weight3", weight3, WEIGHT3),
561};
562
563struct perf_hpp_list perf_hpp_list = {
564 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
565 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
566 .nr_header_lines = 1,
567};
568
569#undef HPP__COLOR_PRINT_FNS
570#undef HPP__COLOR_ACC_PRINT_FNS
571#undef HPP__PRINT_FNS
572
573#undef HPP_PERCENT_FNS
574#undef HPP_PERCENT_ACC_FNS
575#undef HPP_RAW_FNS
576#undef HPP_AVERAGE_FNS
577
578#undef __HPP_HEADER_FN
579#undef __HPP_WIDTH_FN
580#undef __HPP_COLOR_PERCENT_FN
581#undef __HPP_ENTRY_PERCENT_FN
582#undef __HPP_COLOR_ACC_PERCENT_FN
583#undef __HPP_ENTRY_ACC_PERCENT_FN
584#undef __HPP_ENTRY_RAW_FN
585#undef __HPP_ENTRY_AVERAGE_FN
586#undef __HPP_SORT_FN
587#undef __HPP_SORT_ACC_FN
588#undef __HPP_SORT_RAW_FN
589#undef __HPP_SORT_AVERAGE_FN
590
591static void fmt_free(struct perf_hpp_fmt *fmt)
592{
593 /*
594 * At this point fmt should be completely
595 * unhooked, if not it's a bug.
596 */
597 BUG_ON(!list_empty(&fmt->list));
598 BUG_ON(!list_empty(&fmt->sort_list));
599
600 if (fmt->free)
601 fmt->free(fmt);
602}
603
604void perf_hpp__init(void)
605{
606 int i;
607
608 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
609 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
610
611 INIT_LIST_HEAD(&fmt->list);
612
613 /* sort_list may be linked by setup_sorting() */
614 if (fmt->sort_list.next == NULL)
615 INIT_LIST_HEAD(&fmt->sort_list);
616 }
617
618 /*
619 * If user specified field order, no need to setup default fields.
620 */
621 if (is_strict_order(field_order))
622 return;
623
624 if (symbol_conf.cumulate_callchain) {
625 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
626 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
627 }
628
629 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
630
631 if (symbol_conf.show_cpu_utilization) {
632 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
633 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
634
635 if (perf_guest) {
636 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
637 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
638 }
639 }
640
641 if (symbol_conf.show_nr_samples)
642 hpp_dimension__add_output(PERF_HPP__SAMPLES);
643
644 if (symbol_conf.show_total_period)
645 hpp_dimension__add_output(PERF_HPP__PERIOD);
646}
647
648void perf_hpp_list__column_register(struct perf_hpp_list *list,
649 struct perf_hpp_fmt *format)
650{
651 list_add_tail(&format->list, &list->fields);
652}
653
654void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
655 struct perf_hpp_fmt *format)
656{
657 list_add_tail(&format->sort_list, &list->sorts);
658}
659
660void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
661 struct perf_hpp_fmt *format)
662{
663 list_add(&format->sort_list, &list->sorts);
664}
665
666static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
667{
668 list_del_init(&format->list);
669 fmt_free(format);
670}
671
672void perf_hpp__cancel_cumulate(void)
673{
674 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
675
676 if (is_strict_order(field_order))
677 return;
678
679 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
680 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
681
682 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
683 if (acc->equal(acc, fmt)) {
684 perf_hpp__column_unregister(fmt);
685 continue;
686 }
687
688 if (ovh->equal(ovh, fmt))
689 fmt->name = "Overhead";
690 }
691}
692
693static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
694{
695 return a->equal && a->equal(a, b);
696}
697
698void perf_hpp__setup_output_field(struct perf_hpp_list *list)
699{
700 struct perf_hpp_fmt *fmt;
701
702 /* append sort keys to output field */
703 perf_hpp_list__for_each_sort_list(list, fmt) {
704 struct perf_hpp_fmt *pos;
705
706 /* skip sort-only fields ("sort_compute" in perf diff) */
707 if (!fmt->entry && !fmt->color)
708 continue;
709
710 perf_hpp_list__for_each_format(list, pos) {
711 if (fmt_equal(fmt, pos))
712 goto next;
713 }
714
715 perf_hpp__column_register(fmt);
716next:
717 continue;
718 }
719}
720
721void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
722{
723 struct perf_hpp_fmt *fmt;
724
725 /* append output fields to sort keys */
726 perf_hpp_list__for_each_format(list, fmt) {
727 struct perf_hpp_fmt *pos;
728
729 perf_hpp_list__for_each_sort_list(list, pos) {
730 if (fmt_equal(fmt, pos))
731 goto next;
732 }
733
734 perf_hpp__register_sort_field(fmt);
735next:
736 continue;
737 }
738}
739
740
741void perf_hpp__reset_output_field(struct perf_hpp_list *list)
742{
743 struct perf_hpp_fmt *fmt, *tmp;
744
745 /* reset output fields */
746 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
747 list_del_init(&fmt->list);
748 list_del_init(&fmt->sort_list);
749 fmt_free(fmt);
750 }
751
752 /* reset sort keys */
753 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
754 list_del_init(&fmt->list);
755 list_del_init(&fmt->sort_list);
756 fmt_free(fmt);
757 }
758}
759
760/*
761 * See hists__fprintf to match the column widths
762 */
763unsigned int hists__sort_list_width(struct hists *hists)
764{
765 struct perf_hpp_fmt *fmt;
766 int ret = 0;
767 bool first = true;
768 struct perf_hpp dummy_hpp;
769
770 hists__for_each_format(hists, fmt) {
771 if (perf_hpp__should_skip(fmt, hists))
772 continue;
773
774 if (first)
775 first = false;
776 else
777 ret += 2;
778
779 ret += fmt->width(fmt, &dummy_hpp, hists);
780 }
781
782 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
783 ret += 3 + BITS_PER_LONG / 4;
784
785 return ret;
786}
787
788unsigned int hists__overhead_width(struct hists *hists)
789{
790 struct perf_hpp_fmt *fmt;
791 int ret = 0;
792 bool first = true;
793 struct perf_hpp dummy_hpp;
794
795 hists__for_each_format(hists, fmt) {
796 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
797 break;
798
799 if (first)
800 first = false;
801 else
802 ret += 2;
803
804 ret += fmt->width(fmt, &dummy_hpp, hists);
805 }
806
807 return ret;
808}
809
810void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
811{
812 if (perf_hpp__is_sort_entry(fmt))
813 return perf_hpp__reset_sort_width(fmt, hists);
814
815 if (perf_hpp__is_dynamic_entry(fmt))
816 return;
817
818 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
819
820 switch (fmt->idx) {
821 case PERF_HPP__OVERHEAD:
822 case PERF_HPP__OVERHEAD_SYS:
823 case PERF_HPP__OVERHEAD_US:
824 case PERF_HPP__OVERHEAD_ACC:
825 fmt->len = 8;
826 break;
827
828 case PERF_HPP__OVERHEAD_GUEST_SYS:
829 case PERF_HPP__OVERHEAD_GUEST_US:
830 fmt->len = 9;
831 break;
832
833 case PERF_HPP__SAMPLES:
834 case PERF_HPP__PERIOD:
835 fmt->len = 12;
836 break;
837
838 case PERF_HPP__WEIGHT1:
839 case PERF_HPP__WEIGHT2:
840 case PERF_HPP__WEIGHT3:
841 fmt->len = 8;
842 break;
843
844 default:
845 break;
846 }
847}
848
849void hists__reset_column_width(struct hists *hists)
850{
851 struct perf_hpp_fmt *fmt;
852 struct perf_hpp_list_node *node;
853
854 hists__for_each_format(hists, fmt)
855 perf_hpp__reset_width(fmt, hists);
856
857 /* hierarchy entries have their own hpp list */
858 list_for_each_entry(node, &hists->hpp_formats, list) {
859 perf_hpp_list__for_each_format(&node->hpp, fmt)
860 perf_hpp__reset_width(fmt, hists);
861 }
862}
863
864void perf_hpp__set_user_width(const char *width_list_str)
865{
866 struct perf_hpp_fmt *fmt;
867 const char *ptr = width_list_str;
868
869 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
870 char *p;
871
872 int len = strtol(ptr, &p, 10);
873 fmt->user_len = len;
874
875 if (*p == ',')
876 ptr = p + 1;
877 else
878 break;
879 }
880}
881
882static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
883{
884 struct perf_hpp_list_node *node = NULL;
885 struct perf_hpp_fmt *fmt_copy;
886 bool found = false;
887 bool skip = perf_hpp__should_skip(fmt, hists);
888
889 list_for_each_entry(node, &hists->hpp_formats, list) {
890 if (node->level == fmt->level) {
891 found = true;
892 break;
893 }
894 }
895
896 if (!found) {
897 node = malloc(sizeof(*node));
898 if (node == NULL)
899 return -1;
900
901 node->skip = skip;
902 node->level = fmt->level;
903 perf_hpp_list__init(&node->hpp);
904
905 hists->nr_hpp_node++;
906 list_add_tail(&node->list, &hists->hpp_formats);
907 }
908
909 fmt_copy = perf_hpp_fmt__dup(fmt);
910 if (fmt_copy == NULL)
911 return -1;
912
913 if (!skip)
914 node->skip = false;
915
916 list_add_tail(&fmt_copy->list, &node->hpp.fields);
917 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
918
919 return 0;
920}
921
922int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
923 struct evlist *evlist)
924{
925 struct evsel *evsel;
926 struct perf_hpp_fmt *fmt;
927 struct hists *hists;
928 int ret;
929
930 if (!symbol_conf.report_hierarchy)
931 return 0;
932
933 evlist__for_each_entry(evlist, evsel) {
934 hists = evsel__hists(evsel);
935
936 perf_hpp_list__for_each_sort_list(list, fmt) {
937 if (perf_hpp__is_dynamic_entry(fmt) &&
938 !perf_hpp__defined_dynamic_entry(fmt, hists))
939 continue;
940
941 ret = add_hierarchy_fmt(hists, fmt);
942 if (ret < 0)
943 return ret;
944 }
945 }
946
947 return 0;
948}