Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <inttypes.h>
3#include <math.h>
4#include <stdlib.h>
5#include <string.h>
6#include <linux/compiler.h>
7
8#include "../util/callchain.h"
9#include "../util/debug.h"
10#include "../util/hist.h"
11#include "../util/sort.h"
12#include "../util/evsel.h"
13#include "../util/evlist.h"
14#include "../util/thread.h"
15#include "../util/util.h"
16
17/* hist period print (hpp) functions */
18
19#define hpp__call_print_fn(hpp, fn, fmt, ...) \
20({ \
21 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
22 advance_hpp(hpp, __ret); \
23 __ret; \
24})
25
26static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
27 hpp_field_fn get_field, const char *fmt, int len,
28 hpp_snprint_fn print_fn, bool fmt_percent)
29{
30 int ret;
31 struct hists *hists = he->hists;
32 struct evsel *evsel = hists_to_evsel(hists);
33 char *buf = hpp->buf;
34 size_t size = hpp->size;
35
36 if (fmt_percent) {
37 double percent = 0.0;
38 u64 total = hists__total_period(hists);
39
40 if (total)
41 percent = 100.0 * get_field(he) / total;
42
43 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
44 } else
45 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
46
47 if (evsel__is_group_event(evsel)) {
48 int prev_idx, idx_delta;
49 struct hist_entry *pair;
50 int nr_members = evsel->core.nr_members;
51
52 prev_idx = evsel__group_idx(evsel);
53
54 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
55 u64 period = get_field(pair);
56 u64 total = hists__total_period(pair->hists);
57
58 if (!total)
59 continue;
60
61 evsel = hists_to_evsel(pair->hists);
62 idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
63
64 while (idx_delta--) {
65 /*
66 * zero-fill group members in the middle which
67 * have no sample
68 */
69 if (fmt_percent) {
70 ret += hpp__call_print_fn(hpp, print_fn,
71 fmt, len, 0.0);
72 } else {
73 ret += hpp__call_print_fn(hpp, print_fn,
74 fmt, len, 0ULL);
75 }
76 }
77
78 if (fmt_percent) {
79 ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
80 100.0 * period / total);
81 } else {
82 ret += hpp__call_print_fn(hpp, print_fn, fmt,
83 len, period);
84 }
85
86 prev_idx = evsel__group_idx(evsel);
87 }
88
89 idx_delta = nr_members - prev_idx - 1;
90
91 while (idx_delta--) {
92 /*
93 * zero-fill group members at last which have no sample
94 */
95 if (fmt_percent) {
96 ret += hpp__call_print_fn(hpp, print_fn,
97 fmt, len, 0.0);
98 } else {
99 ret += hpp__call_print_fn(hpp, print_fn,
100 fmt, len, 0ULL);
101 }
102 }
103 }
104
105 /*
106 * Restore original buf and size as it's where caller expects
107 * the result will be saved.
108 */
109 hpp->buf = buf;
110 hpp->size = size;
111
112 return ret;
113}
114
115int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
116 struct hist_entry *he, hpp_field_fn get_field,
117 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
118{
119 int len = fmt->user_len ?: fmt->len;
120
121 if (symbol_conf.field_sep) {
122 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
123 print_fn, fmt_percent);
124 }
125
126 if (fmt_percent)
127 len -= 2; /* 2 for a space and a % sign */
128 else
129 len -= 1;
130
131 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
132}
133
134int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
135 struct hist_entry *he, hpp_field_fn get_field,
136 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
137{
138 if (!symbol_conf.cumulate_callchain) {
139 int len = fmt->user_len ?: fmt->len;
140 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
141 }
142
143 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
144}
145
146static int field_cmp(u64 field_a, u64 field_b)
147{
148 if (field_a > field_b)
149 return 1;
150 if (field_a < field_b)
151 return -1;
152 return 0;
153}
154
155static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
156 hpp_field_fn get_field, int nr_members,
157 u64 **fields_a, u64 **fields_b)
158{
159 u64 *fa = calloc(nr_members, sizeof(*fa)),
160 *fb = calloc(nr_members, sizeof(*fb));
161 struct hist_entry *pair;
162
163 if (!fa || !fb)
164 goto out_free;
165
166 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
167 struct evsel *evsel = hists_to_evsel(pair->hists);
168 fa[evsel__group_idx(evsel)] = get_field(pair);
169 }
170
171 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
172 struct evsel *evsel = hists_to_evsel(pair->hists);
173 fb[evsel__group_idx(evsel)] = get_field(pair);
174 }
175
176 *fields_a = fa;
177 *fields_b = fb;
178 return 0;
179out_free:
180 free(fa);
181 free(fb);
182 *fields_a = *fields_b = NULL;
183 return -1;
184}
185
186static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
187 hpp_field_fn get_field, int idx)
188{
189 struct evsel *evsel = hists_to_evsel(a->hists);
190 u64 *fields_a, *fields_b;
191 int cmp, nr_members, ret, i;
192
193 cmp = field_cmp(get_field(a), get_field(b));
194 if (!evsel__is_group_event(evsel))
195 return cmp;
196
197 nr_members = evsel->core.nr_members;
198 if (idx < 1 || idx >= nr_members)
199 return cmp;
200
201 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
202 if (ret) {
203 ret = cmp;
204 goto out;
205 }
206
207 ret = field_cmp(fields_a[idx], fields_b[idx]);
208 if (ret)
209 goto out;
210
211 for (i = 1; i < nr_members; i++) {
212 if (i != idx) {
213 ret = field_cmp(fields_a[i], fields_b[i]);
214 if (ret)
215 goto out;
216 }
217 }
218
219out:
220 free(fields_a);
221 free(fields_b);
222
223 return ret;
224}
225
226static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
227 hpp_field_fn get_field)
228{
229 s64 ret;
230 int i, nr_members;
231 struct evsel *evsel;
232 u64 *fields_a, *fields_b;
233
234 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
235 return __hpp__group_sort_idx(a, b, get_field,
236 symbol_conf.group_sort_idx);
237 }
238
239 ret = field_cmp(get_field(a), get_field(b));
240 if (ret || !symbol_conf.event_group)
241 return ret;
242
243 evsel = hists_to_evsel(a->hists);
244 if (!evsel__is_group_event(evsel))
245 return ret;
246
247 nr_members = evsel->core.nr_members;
248 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
249 if (i)
250 goto out;
251
252 for (i = 1; i < nr_members; i++) {
253 ret = field_cmp(fields_a[i], fields_b[i]);
254 if (ret)
255 break;
256 }
257
258out:
259 free(fields_a);
260 free(fields_b);
261
262 return ret;
263}
264
265static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
266 hpp_field_fn get_field)
267{
268 s64 ret = 0;
269
270 if (symbol_conf.cumulate_callchain) {
271 /*
272 * Put caller above callee when they have equal period.
273 */
274 ret = field_cmp(get_field(a), get_field(b));
275 if (ret)
276 return ret;
277
278 if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
279 (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
280 !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
281 return 0;
282
283 ret = b->callchain->max_depth - a->callchain->max_depth;
284 if (callchain_param.order == ORDER_CALLER)
285 ret = -ret;
286 }
287 return ret;
288}
289
290static int hpp__width_fn(struct perf_hpp_fmt *fmt,
291 struct perf_hpp *hpp __maybe_unused,
292 struct hists *hists)
293{
294 int len = fmt->user_len ?: fmt->len;
295 struct evsel *evsel = hists_to_evsel(hists);
296
297 if (symbol_conf.event_group)
298 len = max(len, evsel->core.nr_members * fmt->len);
299
300 if (len < (int)strlen(fmt->name))
301 len = strlen(fmt->name);
302
303 return len;
304}
305
306static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
307 struct hists *hists, int line __maybe_unused,
308 int *span __maybe_unused)
309{
310 int len = hpp__width_fn(fmt, hpp, hists);
311 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
312}
313
314int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
315{
316 va_list args;
317 ssize_t ssize = hpp->size;
318 double percent;
319 int ret, len;
320
321 va_start(args, fmt);
322 len = va_arg(args, int);
323 percent = va_arg(args, double);
324 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
325 va_end(args);
326
327 return (ret >= ssize) ? (ssize - 1) : ret;
328}
329
330static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
331{
332 va_list args;
333 ssize_t ssize = hpp->size;
334 int ret;
335
336 va_start(args, fmt);
337 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
338 va_end(args);
339
340 return (ret >= ssize) ? (ssize - 1) : ret;
341}
342
343#define __HPP_COLOR_PERCENT_FN(_type, _field) \
344static u64 he_get_##_field(struct hist_entry *he) \
345{ \
346 return he->stat._field; \
347} \
348 \
349static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
350 struct perf_hpp *hpp, struct hist_entry *he) \
351{ \
352 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
353 hpp_color_scnprintf, true); \
354}
355
356#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
357static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
358 struct perf_hpp *hpp, struct hist_entry *he) \
359{ \
360 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
361 hpp_entry_scnprintf, true); \
362}
363
364#define __HPP_SORT_FN(_type, _field) \
365static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
366 struct hist_entry *a, struct hist_entry *b) \
367{ \
368 return __hpp__sort(a, b, he_get_##_field); \
369}
370
371#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
372static u64 he_get_acc_##_field(struct hist_entry *he) \
373{ \
374 return he->stat_acc->_field; \
375} \
376 \
377static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
378 struct perf_hpp *hpp, struct hist_entry *he) \
379{ \
380 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
381 hpp_color_scnprintf, true); \
382}
383
384#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
385static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
386 struct perf_hpp *hpp, struct hist_entry *he) \
387{ \
388 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
389 hpp_entry_scnprintf, true); \
390}
391
392#define __HPP_SORT_ACC_FN(_type, _field) \
393static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
394 struct hist_entry *a, struct hist_entry *b) \
395{ \
396 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
397}
398
399#define __HPP_ENTRY_RAW_FN(_type, _field) \
400static u64 he_get_raw_##_field(struct hist_entry *he) \
401{ \
402 return he->stat._field; \
403} \
404 \
405static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
406 struct perf_hpp *hpp, struct hist_entry *he) \
407{ \
408 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
409 hpp_entry_scnprintf, false); \
410}
411
412#define __HPP_SORT_RAW_FN(_type, _field) \
413static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
414 struct hist_entry *a, struct hist_entry *b) \
415{ \
416 return __hpp__sort(a, b, he_get_raw_##_field); \
417}
418
419
420#define HPP_PERCENT_FNS(_type, _field) \
421__HPP_COLOR_PERCENT_FN(_type, _field) \
422__HPP_ENTRY_PERCENT_FN(_type, _field) \
423__HPP_SORT_FN(_type, _field)
424
425#define HPP_PERCENT_ACC_FNS(_type, _field) \
426__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
427__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
428__HPP_SORT_ACC_FN(_type, _field)
429
430#define HPP_RAW_FNS(_type, _field) \
431__HPP_ENTRY_RAW_FN(_type, _field) \
432__HPP_SORT_RAW_FN(_type, _field)
433
434HPP_PERCENT_FNS(overhead, period)
435HPP_PERCENT_FNS(overhead_sys, period_sys)
436HPP_PERCENT_FNS(overhead_us, period_us)
437HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
438HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
439HPP_PERCENT_ACC_FNS(overhead_acc, period)
440
441HPP_RAW_FNS(samples, nr_events)
442HPP_RAW_FNS(period, period)
443
444static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
445 struct hist_entry *a __maybe_unused,
446 struct hist_entry *b __maybe_unused)
447{
448 return 0;
449}
450
451static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
452{
453 return a->header == hpp__header_fn;
454}
455
456static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
457{
458 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
459 return false;
460
461 return a->idx == b->idx;
462}
463
464#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
465 { \
466 .name = _name, \
467 .header = hpp__header_fn, \
468 .width = hpp__width_fn, \
469 .color = hpp__color_ ## _fn, \
470 .entry = hpp__entry_ ## _fn, \
471 .cmp = hpp__nop_cmp, \
472 .collapse = hpp__nop_cmp, \
473 .sort = hpp__sort_ ## _fn, \
474 .idx = PERF_HPP__ ## _idx, \
475 .equal = hpp__equal, \
476 }
477
478#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
479 { \
480 .name = _name, \
481 .header = hpp__header_fn, \
482 .width = hpp__width_fn, \
483 .color = hpp__color_ ## _fn, \
484 .entry = hpp__entry_ ## _fn, \
485 .cmp = hpp__nop_cmp, \
486 .collapse = hpp__nop_cmp, \
487 .sort = hpp__sort_ ## _fn, \
488 .idx = PERF_HPP__ ## _idx, \
489 .equal = hpp__equal, \
490 }
491
492#define HPP__PRINT_FNS(_name, _fn, _idx) \
493 { \
494 .name = _name, \
495 .header = hpp__header_fn, \
496 .width = hpp__width_fn, \
497 .entry = hpp__entry_ ## _fn, \
498 .cmp = hpp__nop_cmp, \
499 .collapse = hpp__nop_cmp, \
500 .sort = hpp__sort_ ## _fn, \
501 .idx = PERF_HPP__ ## _idx, \
502 .equal = hpp__equal, \
503 }
504
505struct perf_hpp_fmt perf_hpp__format[] = {
506 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
507 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
508 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
509 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
510 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
511 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
512 HPP__PRINT_FNS("Samples", samples, SAMPLES),
513 HPP__PRINT_FNS("Period", period, PERIOD)
514};
515
516struct perf_hpp_list perf_hpp_list = {
517 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
518 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
519 .nr_header_lines = 1,
520};
521
522#undef HPP__COLOR_PRINT_FNS
523#undef HPP__COLOR_ACC_PRINT_FNS
524#undef HPP__PRINT_FNS
525
526#undef HPP_PERCENT_FNS
527#undef HPP_PERCENT_ACC_FNS
528#undef HPP_RAW_FNS
529
530#undef __HPP_HEADER_FN
531#undef __HPP_WIDTH_FN
532#undef __HPP_COLOR_PERCENT_FN
533#undef __HPP_ENTRY_PERCENT_FN
534#undef __HPP_COLOR_ACC_PERCENT_FN
535#undef __HPP_ENTRY_ACC_PERCENT_FN
536#undef __HPP_ENTRY_RAW_FN
537#undef __HPP_SORT_FN
538#undef __HPP_SORT_ACC_FN
539#undef __HPP_SORT_RAW_FN
540
541static void fmt_free(struct perf_hpp_fmt *fmt)
542{
543 /*
544 * At this point fmt should be completely
545 * unhooked, if not it's a bug.
546 */
547 BUG_ON(!list_empty(&fmt->list));
548 BUG_ON(!list_empty(&fmt->sort_list));
549
550 if (fmt->free)
551 fmt->free(fmt);
552}
553
554void perf_hpp__init(void)
555{
556 int i;
557
558 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
559 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
560
561 INIT_LIST_HEAD(&fmt->list);
562
563 /* sort_list may be linked by setup_sorting() */
564 if (fmt->sort_list.next == NULL)
565 INIT_LIST_HEAD(&fmt->sort_list);
566 }
567
568 /*
569 * If user specified field order, no need to setup default fields.
570 */
571 if (is_strict_order(field_order))
572 return;
573
574 if (symbol_conf.cumulate_callchain) {
575 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
576 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
577 }
578
579 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
580
581 if (symbol_conf.show_cpu_utilization) {
582 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
583 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
584
585 if (perf_guest) {
586 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
587 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
588 }
589 }
590
591 if (symbol_conf.show_nr_samples)
592 hpp_dimension__add_output(PERF_HPP__SAMPLES);
593
594 if (symbol_conf.show_total_period)
595 hpp_dimension__add_output(PERF_HPP__PERIOD);
596}
597
598void perf_hpp_list__column_register(struct perf_hpp_list *list,
599 struct perf_hpp_fmt *format)
600{
601 list_add_tail(&format->list, &list->fields);
602}
603
604void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
605 struct perf_hpp_fmt *format)
606{
607 list_add_tail(&format->sort_list, &list->sorts);
608}
609
610void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
611 struct perf_hpp_fmt *format)
612{
613 list_add(&format->sort_list, &list->sorts);
614}
615
616static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
617{
618 list_del_init(&format->list);
619 fmt_free(format);
620}
621
622void perf_hpp__cancel_cumulate(void)
623{
624 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
625
626 if (is_strict_order(field_order))
627 return;
628
629 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
630 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
631
632 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
633 if (acc->equal(acc, fmt)) {
634 perf_hpp__column_unregister(fmt);
635 continue;
636 }
637
638 if (ovh->equal(ovh, fmt))
639 fmt->name = "Overhead";
640 }
641}
642
643static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
644{
645 return a->equal && a->equal(a, b);
646}
647
648void perf_hpp__setup_output_field(struct perf_hpp_list *list)
649{
650 struct perf_hpp_fmt *fmt;
651
652 /* append sort keys to output field */
653 perf_hpp_list__for_each_sort_list(list, fmt) {
654 struct perf_hpp_fmt *pos;
655
656 /* skip sort-only fields ("sort_compute" in perf diff) */
657 if (!fmt->entry && !fmt->color)
658 continue;
659
660 perf_hpp_list__for_each_format(list, pos) {
661 if (fmt_equal(fmt, pos))
662 goto next;
663 }
664
665 perf_hpp__column_register(fmt);
666next:
667 continue;
668 }
669}
670
671void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
672{
673 struct perf_hpp_fmt *fmt;
674
675 /* append output fields to sort keys */
676 perf_hpp_list__for_each_format(list, fmt) {
677 struct perf_hpp_fmt *pos;
678
679 perf_hpp_list__for_each_sort_list(list, pos) {
680 if (fmt_equal(fmt, pos))
681 goto next;
682 }
683
684 perf_hpp__register_sort_field(fmt);
685next:
686 continue;
687 }
688}
689
690
691void perf_hpp__reset_output_field(struct perf_hpp_list *list)
692{
693 struct perf_hpp_fmt *fmt, *tmp;
694
695 /* reset output fields */
696 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
697 list_del_init(&fmt->list);
698 list_del_init(&fmt->sort_list);
699 fmt_free(fmt);
700 }
701
702 /* reset sort keys */
703 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
704 list_del_init(&fmt->list);
705 list_del_init(&fmt->sort_list);
706 fmt_free(fmt);
707 }
708}
709
710/*
711 * See hists__fprintf to match the column widths
712 */
713unsigned int hists__sort_list_width(struct hists *hists)
714{
715 struct perf_hpp_fmt *fmt;
716 int ret = 0;
717 bool first = true;
718 struct perf_hpp dummy_hpp;
719
720 hists__for_each_format(hists, fmt) {
721 if (perf_hpp__should_skip(fmt, hists))
722 continue;
723
724 if (first)
725 first = false;
726 else
727 ret += 2;
728
729 ret += fmt->width(fmt, &dummy_hpp, hists);
730 }
731
732 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
733 ret += 3 + BITS_PER_LONG / 4;
734
735 return ret;
736}
737
738unsigned int hists__overhead_width(struct hists *hists)
739{
740 struct perf_hpp_fmt *fmt;
741 int ret = 0;
742 bool first = true;
743 struct perf_hpp dummy_hpp;
744
745 hists__for_each_format(hists, fmt) {
746 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
747 break;
748
749 if (first)
750 first = false;
751 else
752 ret += 2;
753
754 ret += fmt->width(fmt, &dummy_hpp, hists);
755 }
756
757 return ret;
758}
759
760void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
761{
762 if (perf_hpp__is_sort_entry(fmt))
763 return perf_hpp__reset_sort_width(fmt, hists);
764
765 if (perf_hpp__is_dynamic_entry(fmt))
766 return;
767
768 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
769
770 switch (fmt->idx) {
771 case PERF_HPP__OVERHEAD:
772 case PERF_HPP__OVERHEAD_SYS:
773 case PERF_HPP__OVERHEAD_US:
774 case PERF_HPP__OVERHEAD_ACC:
775 fmt->len = 8;
776 break;
777
778 case PERF_HPP__OVERHEAD_GUEST_SYS:
779 case PERF_HPP__OVERHEAD_GUEST_US:
780 fmt->len = 9;
781 break;
782
783 case PERF_HPP__SAMPLES:
784 case PERF_HPP__PERIOD:
785 fmt->len = 12;
786 break;
787
788 default:
789 break;
790 }
791}
792
793void hists__reset_column_width(struct hists *hists)
794{
795 struct perf_hpp_fmt *fmt;
796 struct perf_hpp_list_node *node;
797
798 hists__for_each_format(hists, fmt)
799 perf_hpp__reset_width(fmt, hists);
800
801 /* hierarchy entries have their own hpp list */
802 list_for_each_entry(node, &hists->hpp_formats, list) {
803 perf_hpp_list__for_each_format(&node->hpp, fmt)
804 perf_hpp__reset_width(fmt, hists);
805 }
806}
807
808void perf_hpp__set_user_width(const char *width_list_str)
809{
810 struct perf_hpp_fmt *fmt;
811 const char *ptr = width_list_str;
812
813 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
814 char *p;
815
816 int len = strtol(ptr, &p, 10);
817 fmt->user_len = len;
818
819 if (*p == ',')
820 ptr = p + 1;
821 else
822 break;
823 }
824}
825
826static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
827{
828 struct perf_hpp_list_node *node = NULL;
829 struct perf_hpp_fmt *fmt_copy;
830 bool found = false;
831 bool skip = perf_hpp__should_skip(fmt, hists);
832
833 list_for_each_entry(node, &hists->hpp_formats, list) {
834 if (node->level == fmt->level) {
835 found = true;
836 break;
837 }
838 }
839
840 if (!found) {
841 node = malloc(sizeof(*node));
842 if (node == NULL)
843 return -1;
844
845 node->skip = skip;
846 node->level = fmt->level;
847 perf_hpp_list__init(&node->hpp);
848
849 hists->nr_hpp_node++;
850 list_add_tail(&node->list, &hists->hpp_formats);
851 }
852
853 fmt_copy = perf_hpp_fmt__dup(fmt);
854 if (fmt_copy == NULL)
855 return -1;
856
857 if (!skip)
858 node->skip = false;
859
860 list_add_tail(&fmt_copy->list, &node->hpp.fields);
861 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
862
863 return 0;
864}
865
866int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
867 struct evlist *evlist)
868{
869 struct evsel *evsel;
870 struct perf_hpp_fmt *fmt;
871 struct hists *hists;
872 int ret;
873
874 if (!symbol_conf.report_hierarchy)
875 return 0;
876
877 evlist__for_each_entry(evlist, evsel) {
878 hists = evsel__hists(evsel);
879
880 perf_hpp_list__for_each_sort_list(list, fmt) {
881 if (perf_hpp__is_dynamic_entry(fmt) &&
882 !perf_hpp__defined_dynamic_entry(fmt, hists))
883 continue;
884
885 ret = add_hierarchy_fmt(hists, fmt);
886 if (ret < 0)
887 return ret;
888 }
889 }
890
891 return 0;
892}
1// SPDX-License-Identifier: GPL-2.0
2#include <inttypes.h>
3#include <math.h>
4#include <stdlib.h>
5#include <string.h>
6#include <linux/compiler.h>
7
8#include "../util/callchain.h"
9#include "../util/debug.h"
10#include "../util/hist.h"
11#include "../util/sort.h"
12#include "../util/evsel.h"
13#include "../util/evlist.h"
14#include "../perf.h"
15
16/* hist period print (hpp) functions */
17
18#define hpp__call_print_fn(hpp, fn, fmt, ...) \
19({ \
20 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
21 advance_hpp(hpp, __ret); \
22 __ret; \
23})
24
25static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
26 hpp_field_fn get_field, const char *fmt, int len,
27 hpp_snprint_fn print_fn, bool fmt_percent)
28{
29 int ret;
30 struct hists *hists = he->hists;
31 struct evsel *evsel = hists_to_evsel(hists);
32 char *buf = hpp->buf;
33 size_t size = hpp->size;
34
35 if (fmt_percent) {
36 double percent = 0.0;
37 u64 total = hists__total_period(hists);
38
39 if (total)
40 percent = 100.0 * get_field(he) / total;
41
42 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
43 } else
44 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
45
46 if (evsel__is_group_event(evsel)) {
47 int prev_idx, idx_delta;
48 struct hist_entry *pair;
49 int nr_members = evsel->core.nr_members;
50
51 prev_idx = evsel__group_idx(evsel);
52
53 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
54 u64 period = get_field(pair);
55 u64 total = hists__total_period(pair->hists);
56
57 if (!total)
58 continue;
59
60 evsel = hists_to_evsel(pair->hists);
61 idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
62
63 while (idx_delta--) {
64 /*
65 * zero-fill group members in the middle which
66 * have no sample
67 */
68 if (fmt_percent) {
69 ret += hpp__call_print_fn(hpp, print_fn,
70 fmt, len, 0.0);
71 } else {
72 ret += hpp__call_print_fn(hpp, print_fn,
73 fmt, len, 0ULL);
74 }
75 }
76
77 if (fmt_percent) {
78 ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
79 100.0 * period / total);
80 } else {
81 ret += hpp__call_print_fn(hpp, print_fn, fmt,
82 len, period);
83 }
84
85 prev_idx = evsel__group_idx(evsel);
86 }
87
88 idx_delta = nr_members - prev_idx - 1;
89
90 while (idx_delta--) {
91 /*
92 * zero-fill group members at last which have no sample
93 */
94 if (fmt_percent) {
95 ret += hpp__call_print_fn(hpp, print_fn,
96 fmt, len, 0.0);
97 } else {
98 ret += hpp__call_print_fn(hpp, print_fn,
99 fmt, len, 0ULL);
100 }
101 }
102 }
103
104 /*
105 * Restore original buf and size as it's where caller expects
106 * the result will be saved.
107 */
108 hpp->buf = buf;
109 hpp->size = size;
110
111 return ret;
112}
113
114int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
115 struct hist_entry *he, hpp_field_fn get_field,
116 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
117{
118 int len = fmt->user_len ?: fmt->len;
119
120 if (symbol_conf.field_sep) {
121 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
122 print_fn, fmt_percent);
123 }
124
125 if (fmt_percent)
126 len -= 2; /* 2 for a space and a % sign */
127 else
128 len -= 1;
129
130 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
131}
132
133int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
134 struct hist_entry *he, hpp_field_fn get_field,
135 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
136{
137 if (!symbol_conf.cumulate_callchain) {
138 int len = fmt->user_len ?: fmt->len;
139 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
140 }
141
142 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
143}
144
145static int field_cmp(u64 field_a, u64 field_b)
146{
147 if (field_a > field_b)
148 return 1;
149 if (field_a < field_b)
150 return -1;
151 return 0;
152}
153
154static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
155 hpp_field_fn get_field, int nr_members,
156 u64 **fields_a, u64 **fields_b)
157{
158 u64 *fa = calloc(nr_members, sizeof(*fa)),
159 *fb = calloc(nr_members, sizeof(*fb));
160 struct hist_entry *pair;
161
162 if (!fa || !fb)
163 goto out_free;
164
165 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
166 struct evsel *evsel = hists_to_evsel(pair->hists);
167 fa[evsel__group_idx(evsel)] = get_field(pair);
168 }
169
170 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
171 struct evsel *evsel = hists_to_evsel(pair->hists);
172 fb[evsel__group_idx(evsel)] = get_field(pair);
173 }
174
175 *fields_a = fa;
176 *fields_b = fb;
177 return 0;
178out_free:
179 free(fa);
180 free(fb);
181 *fields_a = *fields_b = NULL;
182 return -1;
183}
184
185static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
186 hpp_field_fn get_field, int idx)
187{
188 struct evsel *evsel = hists_to_evsel(a->hists);
189 u64 *fields_a, *fields_b;
190 int cmp, nr_members, ret, i;
191
192 cmp = field_cmp(get_field(a), get_field(b));
193 if (!evsel__is_group_event(evsel))
194 return cmp;
195
196 nr_members = evsel->core.nr_members;
197 if (idx < 1 || idx >= nr_members)
198 return cmp;
199
200 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
201 if (ret) {
202 ret = cmp;
203 goto out;
204 }
205
206 ret = field_cmp(fields_a[idx], fields_b[idx]);
207 if (ret)
208 goto out;
209
210 for (i = 1; i < nr_members; i++) {
211 if (i != idx) {
212 ret = field_cmp(fields_a[i], fields_b[i]);
213 if (ret)
214 goto out;
215 }
216 }
217
218out:
219 free(fields_a);
220 free(fields_b);
221
222 return ret;
223}
224
225static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
226 hpp_field_fn get_field)
227{
228 s64 ret;
229 int i, nr_members;
230 struct evsel *evsel;
231 u64 *fields_a, *fields_b;
232
233 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
234 return __hpp__group_sort_idx(a, b, get_field,
235 symbol_conf.group_sort_idx);
236 }
237
238 ret = field_cmp(get_field(a), get_field(b));
239 if (ret || !symbol_conf.event_group)
240 return ret;
241
242 evsel = hists_to_evsel(a->hists);
243 if (!evsel__is_group_event(evsel))
244 return ret;
245
246 nr_members = evsel->core.nr_members;
247 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
248 if (i)
249 goto out;
250
251 for (i = 1; i < nr_members; i++) {
252 ret = field_cmp(fields_a[i], fields_b[i]);
253 if (ret)
254 break;
255 }
256
257out:
258 free(fields_a);
259 free(fields_b);
260
261 return ret;
262}
263
264static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
265 hpp_field_fn get_field)
266{
267 s64 ret = 0;
268
269 if (symbol_conf.cumulate_callchain) {
270 /*
271 * Put caller above callee when they have equal period.
272 */
273 ret = field_cmp(get_field(a), get_field(b));
274 if (ret)
275 return ret;
276
277 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
278 return 0;
279
280 ret = b->callchain->max_depth - a->callchain->max_depth;
281 if (callchain_param.order == ORDER_CALLER)
282 ret = -ret;
283 }
284 return ret;
285}
286
287static int hpp__width_fn(struct perf_hpp_fmt *fmt,
288 struct perf_hpp *hpp __maybe_unused,
289 struct hists *hists)
290{
291 int len = fmt->user_len ?: fmt->len;
292 struct evsel *evsel = hists_to_evsel(hists);
293
294 if (symbol_conf.event_group)
295 len = max(len, evsel->core.nr_members * fmt->len);
296
297 if (len < (int)strlen(fmt->name))
298 len = strlen(fmt->name);
299
300 return len;
301}
302
303static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
304 struct hists *hists, int line __maybe_unused,
305 int *span __maybe_unused)
306{
307 int len = hpp__width_fn(fmt, hpp, hists);
308 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
309}
310
311int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
312{
313 va_list args;
314 ssize_t ssize = hpp->size;
315 double percent;
316 int ret, len;
317
318 va_start(args, fmt);
319 len = va_arg(args, int);
320 percent = va_arg(args, double);
321 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
322 va_end(args);
323
324 return (ret >= ssize) ? (ssize - 1) : ret;
325}
326
327static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
328{
329 va_list args;
330 ssize_t ssize = hpp->size;
331 int ret;
332
333 va_start(args, fmt);
334 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
335 va_end(args);
336
337 return (ret >= ssize) ? (ssize - 1) : ret;
338}
339
340#define __HPP_COLOR_PERCENT_FN(_type, _field) \
341static u64 he_get_##_field(struct hist_entry *he) \
342{ \
343 return he->stat._field; \
344} \
345 \
346static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
347 struct perf_hpp *hpp, struct hist_entry *he) \
348{ \
349 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
350 hpp_color_scnprintf, true); \
351}
352
353#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
354static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
355 struct perf_hpp *hpp, struct hist_entry *he) \
356{ \
357 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
358 hpp_entry_scnprintf, true); \
359}
360
361#define __HPP_SORT_FN(_type, _field) \
362static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
363 struct hist_entry *a, struct hist_entry *b) \
364{ \
365 return __hpp__sort(a, b, he_get_##_field); \
366}
367
368#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
369static u64 he_get_acc_##_field(struct hist_entry *he) \
370{ \
371 return he->stat_acc->_field; \
372} \
373 \
374static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
375 struct perf_hpp *hpp, struct hist_entry *he) \
376{ \
377 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
378 hpp_color_scnprintf, true); \
379}
380
381#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
382static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
383 struct perf_hpp *hpp, struct hist_entry *he) \
384{ \
385 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
386 hpp_entry_scnprintf, true); \
387}
388
389#define __HPP_SORT_ACC_FN(_type, _field) \
390static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
391 struct hist_entry *a, struct hist_entry *b) \
392{ \
393 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
394}
395
396#define __HPP_ENTRY_RAW_FN(_type, _field) \
397static u64 he_get_raw_##_field(struct hist_entry *he) \
398{ \
399 return he->stat._field; \
400} \
401 \
402static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
403 struct perf_hpp *hpp, struct hist_entry *he) \
404{ \
405 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
406 hpp_entry_scnprintf, false); \
407}
408
409#define __HPP_SORT_RAW_FN(_type, _field) \
410static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
411 struct hist_entry *a, struct hist_entry *b) \
412{ \
413 return __hpp__sort(a, b, he_get_raw_##_field); \
414}
415
416
417#define HPP_PERCENT_FNS(_type, _field) \
418__HPP_COLOR_PERCENT_FN(_type, _field) \
419__HPP_ENTRY_PERCENT_FN(_type, _field) \
420__HPP_SORT_FN(_type, _field)
421
422#define HPP_PERCENT_ACC_FNS(_type, _field) \
423__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
424__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
425__HPP_SORT_ACC_FN(_type, _field)
426
427#define HPP_RAW_FNS(_type, _field) \
428__HPP_ENTRY_RAW_FN(_type, _field) \
429__HPP_SORT_RAW_FN(_type, _field)
430
431HPP_PERCENT_FNS(overhead, period)
432HPP_PERCENT_FNS(overhead_sys, period_sys)
433HPP_PERCENT_FNS(overhead_us, period_us)
434HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
435HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
436HPP_PERCENT_ACC_FNS(overhead_acc, period)
437
438HPP_RAW_FNS(samples, nr_events)
439HPP_RAW_FNS(period, period)
440
441static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
442 struct hist_entry *a __maybe_unused,
443 struct hist_entry *b __maybe_unused)
444{
445 return 0;
446}
447
448static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
449{
450 return a->header == hpp__header_fn;
451}
452
453static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
454{
455 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
456 return false;
457
458 return a->idx == b->idx;
459}
460
461#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
462 { \
463 .name = _name, \
464 .header = hpp__header_fn, \
465 .width = hpp__width_fn, \
466 .color = hpp__color_ ## _fn, \
467 .entry = hpp__entry_ ## _fn, \
468 .cmp = hpp__nop_cmp, \
469 .collapse = hpp__nop_cmp, \
470 .sort = hpp__sort_ ## _fn, \
471 .idx = PERF_HPP__ ## _idx, \
472 .equal = hpp__equal, \
473 }
474
475#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
476 { \
477 .name = _name, \
478 .header = hpp__header_fn, \
479 .width = hpp__width_fn, \
480 .color = hpp__color_ ## _fn, \
481 .entry = hpp__entry_ ## _fn, \
482 .cmp = hpp__nop_cmp, \
483 .collapse = hpp__nop_cmp, \
484 .sort = hpp__sort_ ## _fn, \
485 .idx = PERF_HPP__ ## _idx, \
486 .equal = hpp__equal, \
487 }
488
489#define HPP__PRINT_FNS(_name, _fn, _idx) \
490 { \
491 .name = _name, \
492 .header = hpp__header_fn, \
493 .width = hpp__width_fn, \
494 .entry = hpp__entry_ ## _fn, \
495 .cmp = hpp__nop_cmp, \
496 .collapse = hpp__nop_cmp, \
497 .sort = hpp__sort_ ## _fn, \
498 .idx = PERF_HPP__ ## _idx, \
499 .equal = hpp__equal, \
500 }
501
502struct perf_hpp_fmt perf_hpp__format[] = {
503 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
504 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
505 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
506 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
507 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
508 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
509 HPP__PRINT_FNS("Samples", samples, SAMPLES),
510 HPP__PRINT_FNS("Period", period, PERIOD)
511};
512
513struct perf_hpp_list perf_hpp_list = {
514 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
515 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
516 .nr_header_lines = 1,
517};
518
519#undef HPP__COLOR_PRINT_FNS
520#undef HPP__COLOR_ACC_PRINT_FNS
521#undef HPP__PRINT_FNS
522
523#undef HPP_PERCENT_FNS
524#undef HPP_PERCENT_ACC_FNS
525#undef HPP_RAW_FNS
526
527#undef __HPP_HEADER_FN
528#undef __HPP_WIDTH_FN
529#undef __HPP_COLOR_PERCENT_FN
530#undef __HPP_ENTRY_PERCENT_FN
531#undef __HPP_COLOR_ACC_PERCENT_FN
532#undef __HPP_ENTRY_ACC_PERCENT_FN
533#undef __HPP_ENTRY_RAW_FN
534#undef __HPP_SORT_FN
535#undef __HPP_SORT_ACC_FN
536#undef __HPP_SORT_RAW_FN
537
538
539void perf_hpp__init(void)
540{
541 int i;
542
543 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
544 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
545
546 INIT_LIST_HEAD(&fmt->list);
547
548 /* sort_list may be linked by setup_sorting() */
549 if (fmt->sort_list.next == NULL)
550 INIT_LIST_HEAD(&fmt->sort_list);
551 }
552
553 /*
554 * If user specified field order, no need to setup default fields.
555 */
556 if (is_strict_order(field_order))
557 return;
558
559 if (symbol_conf.cumulate_callchain) {
560 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
561 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
562 }
563
564 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
565
566 if (symbol_conf.show_cpu_utilization) {
567 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
568 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
569
570 if (perf_guest) {
571 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
572 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
573 }
574 }
575
576 if (symbol_conf.show_nr_samples)
577 hpp_dimension__add_output(PERF_HPP__SAMPLES);
578
579 if (symbol_conf.show_total_period)
580 hpp_dimension__add_output(PERF_HPP__PERIOD);
581}
582
583void perf_hpp_list__column_register(struct perf_hpp_list *list,
584 struct perf_hpp_fmt *format)
585{
586 list_add_tail(&format->list, &list->fields);
587}
588
589void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
590 struct perf_hpp_fmt *format)
591{
592 list_add_tail(&format->sort_list, &list->sorts);
593}
594
595void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
596 struct perf_hpp_fmt *format)
597{
598 list_add(&format->sort_list, &list->sorts);
599}
600
601void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
602{
603 list_del_init(&format->list);
604}
605
606void perf_hpp__cancel_cumulate(void)
607{
608 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
609
610 if (is_strict_order(field_order))
611 return;
612
613 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
614 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
615
616 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
617 if (acc->equal(acc, fmt)) {
618 perf_hpp__column_unregister(fmt);
619 continue;
620 }
621
622 if (ovh->equal(ovh, fmt))
623 fmt->name = "Overhead";
624 }
625}
626
627static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
628{
629 return a->equal && a->equal(a, b);
630}
631
632void perf_hpp__setup_output_field(struct perf_hpp_list *list)
633{
634 struct perf_hpp_fmt *fmt;
635
636 /* append sort keys to output field */
637 perf_hpp_list__for_each_sort_list(list, fmt) {
638 struct perf_hpp_fmt *pos;
639
640 /* skip sort-only fields ("sort_compute" in perf diff) */
641 if (!fmt->entry && !fmt->color)
642 continue;
643
644 perf_hpp_list__for_each_format(list, pos) {
645 if (fmt_equal(fmt, pos))
646 goto next;
647 }
648
649 perf_hpp__column_register(fmt);
650next:
651 continue;
652 }
653}
654
655void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
656{
657 struct perf_hpp_fmt *fmt;
658
659 /* append output fields to sort keys */
660 perf_hpp_list__for_each_format(list, fmt) {
661 struct perf_hpp_fmt *pos;
662
663 perf_hpp_list__for_each_sort_list(list, pos) {
664 if (fmt_equal(fmt, pos))
665 goto next;
666 }
667
668 perf_hpp__register_sort_field(fmt);
669next:
670 continue;
671 }
672}
673
674
675static void fmt_free(struct perf_hpp_fmt *fmt)
676{
677 /*
678 * At this point fmt should be completely
679 * unhooked, if not it's a bug.
680 */
681 BUG_ON(!list_empty(&fmt->list));
682 BUG_ON(!list_empty(&fmt->sort_list));
683
684 if (fmt->free)
685 fmt->free(fmt);
686}
687
688void perf_hpp__reset_output_field(struct perf_hpp_list *list)
689{
690 struct perf_hpp_fmt *fmt, *tmp;
691
692 /* reset output fields */
693 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
694 list_del_init(&fmt->list);
695 list_del_init(&fmt->sort_list);
696 fmt_free(fmt);
697 }
698
699 /* reset sort keys */
700 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
701 list_del_init(&fmt->list);
702 list_del_init(&fmt->sort_list);
703 fmt_free(fmt);
704 }
705}
706
707/*
708 * See hists__fprintf to match the column widths
709 */
710unsigned int hists__sort_list_width(struct hists *hists)
711{
712 struct perf_hpp_fmt *fmt;
713 int ret = 0;
714 bool first = true;
715 struct perf_hpp dummy_hpp;
716
717 hists__for_each_format(hists, fmt) {
718 if (perf_hpp__should_skip(fmt, hists))
719 continue;
720
721 if (first)
722 first = false;
723 else
724 ret += 2;
725
726 ret += fmt->width(fmt, &dummy_hpp, hists);
727 }
728
729 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
730 ret += 3 + BITS_PER_LONG / 4;
731
732 return ret;
733}
734
735unsigned int hists__overhead_width(struct hists *hists)
736{
737 struct perf_hpp_fmt *fmt;
738 int ret = 0;
739 bool first = true;
740 struct perf_hpp dummy_hpp;
741
742 hists__for_each_format(hists, fmt) {
743 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
744 break;
745
746 if (first)
747 first = false;
748 else
749 ret += 2;
750
751 ret += fmt->width(fmt, &dummy_hpp, hists);
752 }
753
754 return ret;
755}
756
757void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
758{
759 if (perf_hpp__is_sort_entry(fmt))
760 return perf_hpp__reset_sort_width(fmt, hists);
761
762 if (perf_hpp__is_dynamic_entry(fmt))
763 return;
764
765 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
766
767 switch (fmt->idx) {
768 case PERF_HPP__OVERHEAD:
769 case PERF_HPP__OVERHEAD_SYS:
770 case PERF_HPP__OVERHEAD_US:
771 case PERF_HPP__OVERHEAD_ACC:
772 fmt->len = 8;
773 break;
774
775 case PERF_HPP__OVERHEAD_GUEST_SYS:
776 case PERF_HPP__OVERHEAD_GUEST_US:
777 fmt->len = 9;
778 break;
779
780 case PERF_HPP__SAMPLES:
781 case PERF_HPP__PERIOD:
782 fmt->len = 12;
783 break;
784
785 default:
786 break;
787 }
788}
789
790void hists__reset_column_width(struct hists *hists)
791{
792 struct perf_hpp_fmt *fmt;
793 struct perf_hpp_list_node *node;
794
795 hists__for_each_format(hists, fmt)
796 perf_hpp__reset_width(fmt, hists);
797
798 /* hierarchy entries have their own hpp list */
799 list_for_each_entry(node, &hists->hpp_formats, list) {
800 perf_hpp_list__for_each_format(&node->hpp, fmt)
801 perf_hpp__reset_width(fmt, hists);
802 }
803}
804
805void perf_hpp__set_user_width(const char *width_list_str)
806{
807 struct perf_hpp_fmt *fmt;
808 const char *ptr = width_list_str;
809
810 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
811 char *p;
812
813 int len = strtol(ptr, &p, 10);
814 fmt->user_len = len;
815
816 if (*p == ',')
817 ptr = p + 1;
818 else
819 break;
820 }
821}
822
823static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
824{
825 struct perf_hpp_list_node *node = NULL;
826 struct perf_hpp_fmt *fmt_copy;
827 bool found = false;
828 bool skip = perf_hpp__should_skip(fmt, hists);
829
830 list_for_each_entry(node, &hists->hpp_formats, list) {
831 if (node->level == fmt->level) {
832 found = true;
833 break;
834 }
835 }
836
837 if (!found) {
838 node = malloc(sizeof(*node));
839 if (node == NULL)
840 return -1;
841
842 node->skip = skip;
843 node->level = fmt->level;
844 perf_hpp_list__init(&node->hpp);
845
846 hists->nr_hpp_node++;
847 list_add_tail(&node->list, &hists->hpp_formats);
848 }
849
850 fmt_copy = perf_hpp_fmt__dup(fmt);
851 if (fmt_copy == NULL)
852 return -1;
853
854 if (!skip)
855 node->skip = false;
856
857 list_add_tail(&fmt_copy->list, &node->hpp.fields);
858 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
859
860 return 0;
861}
862
863int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
864 struct evlist *evlist)
865{
866 struct evsel *evsel;
867 struct perf_hpp_fmt *fmt;
868 struct hists *hists;
869 int ret;
870
871 if (!symbol_conf.report_hierarchy)
872 return 0;
873
874 evlist__for_each_entry(evlist, evsel) {
875 hists = evsel__hists(evsel);
876
877 perf_hpp_list__for_each_sort_list(list, fmt) {
878 if (perf_hpp__is_dynamic_entry(fmt) &&
879 !perf_hpp__defined_dynamic_entry(fmt, hists))
880 continue;
881
882 ret = add_hierarchy_fmt(hists, fmt);
883 if (ret < 0)
884 return ret;
885 }
886 }
887
888 return 0;
889}