Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4 *
5 * Handle the callchains from the stream in an ad-hoc radix tree and then
6 * sort them in an rbtree.
7 *
8 * Using a radix for code path provides a fast retrieval and factorizes
9 * memory use. Also that lets us use the paths in a hierarchical graph view.
10 *
11 */
12
13#include <inttypes.h>
14#include <stdlib.h>
15#include <stdio.h>
16#include <stdbool.h>
17#include <errno.h>
18#include <math.h>
19#include <linux/string.h>
20#include <linux/zalloc.h>
21
22#include "asm/bug.h"
23
24#include "debug.h"
25#include "dso.h"
26#include "event.h"
27#include "hist.h"
28#include "sort.h"
29#include "machine.h"
30#include "map.h"
31#include "callchain.h"
32#include "branch.h"
33#include "symbol.h"
34#include "../perf.h"
35
36#define CALLCHAIN_PARAM_DEFAULT \
37 .mode = CHAIN_GRAPH_ABS, \
38 .min_percent = 0.5, \
39 .order = ORDER_CALLEE, \
40 .key = CCKEY_FUNCTION, \
41 .value = CCVAL_PERCENT, \
42
43struct callchain_param callchain_param = {
44 CALLCHAIN_PARAM_DEFAULT
45};
46
47/*
48 * Are there any events usind DWARF callchains?
49 *
50 * I.e.
51 *
52 * -e cycles/call-graph=dwarf/
53 */
54bool dwarf_callchain_users;
55
56struct callchain_param callchain_param_default = {
57 CALLCHAIN_PARAM_DEFAULT
58};
59
60__thread struct callchain_cursor callchain_cursor;
61
62int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
63{
64 return parse_callchain_record(arg, param);
65}
66
67static int parse_callchain_mode(const char *value)
68{
69 if (!strncmp(value, "graph", strlen(value))) {
70 callchain_param.mode = CHAIN_GRAPH_ABS;
71 return 0;
72 }
73 if (!strncmp(value, "flat", strlen(value))) {
74 callchain_param.mode = CHAIN_FLAT;
75 return 0;
76 }
77 if (!strncmp(value, "fractal", strlen(value))) {
78 callchain_param.mode = CHAIN_GRAPH_REL;
79 return 0;
80 }
81 if (!strncmp(value, "folded", strlen(value))) {
82 callchain_param.mode = CHAIN_FOLDED;
83 return 0;
84 }
85 return -1;
86}
87
88static int parse_callchain_order(const char *value)
89{
90 if (!strncmp(value, "caller", strlen(value))) {
91 callchain_param.order = ORDER_CALLER;
92 callchain_param.order_set = true;
93 return 0;
94 }
95 if (!strncmp(value, "callee", strlen(value))) {
96 callchain_param.order = ORDER_CALLEE;
97 callchain_param.order_set = true;
98 return 0;
99 }
100 return -1;
101}
102
103static int parse_callchain_sort_key(const char *value)
104{
105 if (!strncmp(value, "function", strlen(value))) {
106 callchain_param.key = CCKEY_FUNCTION;
107 return 0;
108 }
109 if (!strncmp(value, "address", strlen(value))) {
110 callchain_param.key = CCKEY_ADDRESS;
111 return 0;
112 }
113 if (!strncmp(value, "srcline", strlen(value))) {
114 callchain_param.key = CCKEY_SRCLINE;
115 return 0;
116 }
117 if (!strncmp(value, "branch", strlen(value))) {
118 callchain_param.branch_callstack = 1;
119 return 0;
120 }
121 return -1;
122}
123
124static int parse_callchain_value(const char *value)
125{
126 if (!strncmp(value, "percent", strlen(value))) {
127 callchain_param.value = CCVAL_PERCENT;
128 return 0;
129 }
130 if (!strncmp(value, "period", strlen(value))) {
131 callchain_param.value = CCVAL_PERIOD;
132 return 0;
133 }
134 if (!strncmp(value, "count", strlen(value))) {
135 callchain_param.value = CCVAL_COUNT;
136 return 0;
137 }
138 return -1;
139}
140
141static int get_stack_size(const char *str, unsigned long *_size)
142{
143 char *endptr;
144 unsigned long size;
145 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
146
147 size = strtoul(str, &endptr, 0);
148
149 do {
150 if (*endptr)
151 break;
152
153 size = round_up(size, sizeof(u64));
154 if (!size || size > max_size)
155 break;
156
157 *_size = size;
158 return 0;
159
160 } while (0);
161
162 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
163 max_size, str);
164 return -1;
165}
166
167static int
168__parse_callchain_report_opt(const char *arg, bool allow_record_opt)
169{
170 char *tok;
171 char *endptr, *saveptr = NULL;
172 bool minpcnt_set = false;
173 bool record_opt_set = false;
174 bool try_stack_size = false;
175
176 callchain_param.enabled = true;
177 symbol_conf.use_callchain = true;
178
179 if (!arg)
180 return 0;
181
182 while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
183 if (!strncmp(tok, "none", strlen(tok))) {
184 callchain_param.mode = CHAIN_NONE;
185 callchain_param.enabled = false;
186 symbol_conf.use_callchain = false;
187 return 0;
188 }
189
190 if (!parse_callchain_mode(tok) ||
191 !parse_callchain_order(tok) ||
192 !parse_callchain_sort_key(tok) ||
193 !parse_callchain_value(tok)) {
194 /* parsing ok - move on to the next */
195 try_stack_size = false;
196 goto next;
197 } else if (allow_record_opt && !record_opt_set) {
198 if (parse_callchain_record(tok, &callchain_param))
199 goto try_numbers;
200
201 /* assume that number followed by 'dwarf' is stack size */
202 if (callchain_param.record_mode == CALLCHAIN_DWARF)
203 try_stack_size = true;
204
205 record_opt_set = true;
206 goto next;
207 }
208
209try_numbers:
210 if (try_stack_size) {
211 unsigned long size = 0;
212
213 if (get_stack_size(tok, &size) < 0)
214 return -1;
215 callchain_param.dump_size = size;
216 try_stack_size = false;
217 } else if (!minpcnt_set) {
218 /* try to get the min percent */
219 callchain_param.min_percent = strtod(tok, &endptr);
220 if (tok == endptr)
221 return -1;
222 minpcnt_set = true;
223 } else {
224 /* try print limit at last */
225 callchain_param.print_limit = strtoul(tok, &endptr, 0);
226 if (tok == endptr)
227 return -1;
228 }
229next:
230 arg = NULL;
231 }
232
233 if (callchain_register_param(&callchain_param) < 0) {
234 pr_err("Can't register callchain params\n");
235 return -1;
236 }
237 return 0;
238}
239
240int parse_callchain_report_opt(const char *arg)
241{
242 return __parse_callchain_report_opt(arg, false);
243}
244
245int parse_callchain_top_opt(const char *arg)
246{
247 return __parse_callchain_report_opt(arg, true);
248}
249
250int parse_callchain_record(const char *arg, struct callchain_param *param)
251{
252 char *tok, *name, *saveptr = NULL;
253 char *buf;
254 int ret = -1;
255
256 /* We need buffer that we know we can write to. */
257 buf = malloc(strlen(arg) + 1);
258 if (!buf)
259 return -ENOMEM;
260
261 strcpy(buf, arg);
262
263 tok = strtok_r((char *)buf, ",", &saveptr);
264 name = tok ? : (char *)buf;
265
266 do {
267 /* Framepointer style */
268 if (!strncmp(name, "fp", sizeof("fp"))) {
269 if (!strtok_r(NULL, ",", &saveptr)) {
270 param->record_mode = CALLCHAIN_FP;
271 ret = 0;
272 } else
273 pr_err("callchain: No more arguments "
274 "needed for --call-graph fp\n");
275 break;
276
277 /* Dwarf style */
278 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
279 const unsigned long default_stack_dump_size = 8192;
280
281 ret = 0;
282 param->record_mode = CALLCHAIN_DWARF;
283 param->dump_size = default_stack_dump_size;
284 dwarf_callchain_users = true;
285
286 tok = strtok_r(NULL, ",", &saveptr);
287 if (tok) {
288 unsigned long size = 0;
289
290 ret = get_stack_size(tok, &size);
291 param->dump_size = size;
292 }
293 } else if (!strncmp(name, "lbr", sizeof("lbr"))) {
294 if (!strtok_r(NULL, ",", &saveptr)) {
295 param->record_mode = CALLCHAIN_LBR;
296 ret = 0;
297 } else
298 pr_err("callchain: No more arguments "
299 "needed for --call-graph lbr\n");
300 break;
301 } else {
302 pr_err("callchain: Unknown --call-graph option "
303 "value: %s\n", arg);
304 break;
305 }
306
307 } while (0);
308
309 free(buf);
310 return ret;
311}
312
313int perf_callchain_config(const char *var, const char *value)
314{
315 char *endptr;
316
317 if (!strstarts(var, "call-graph."))
318 return 0;
319 var += sizeof("call-graph.") - 1;
320
321 if (!strcmp(var, "record-mode"))
322 return parse_callchain_record_opt(value, &callchain_param);
323 if (!strcmp(var, "dump-size")) {
324 unsigned long size = 0;
325 int ret;
326
327 ret = get_stack_size(value, &size);
328 callchain_param.dump_size = size;
329
330 return ret;
331 }
332 if (!strcmp(var, "print-type")){
333 int ret;
334 ret = parse_callchain_mode(value);
335 if (ret == -1)
336 pr_err("Invalid callchain mode: %s\n", value);
337 return ret;
338 }
339 if (!strcmp(var, "order")){
340 int ret;
341 ret = parse_callchain_order(value);
342 if (ret == -1)
343 pr_err("Invalid callchain order: %s\n", value);
344 return ret;
345 }
346 if (!strcmp(var, "sort-key")){
347 int ret;
348 ret = parse_callchain_sort_key(value);
349 if (ret == -1)
350 pr_err("Invalid callchain sort key: %s\n", value);
351 return ret;
352 }
353 if (!strcmp(var, "threshold")) {
354 callchain_param.min_percent = strtod(value, &endptr);
355 if (value == endptr) {
356 pr_err("Invalid callchain threshold: %s\n", value);
357 return -1;
358 }
359 }
360 if (!strcmp(var, "print-limit")) {
361 callchain_param.print_limit = strtod(value, &endptr);
362 if (value == endptr) {
363 pr_err("Invalid callchain print limit: %s\n", value);
364 return -1;
365 }
366 }
367
368 return 0;
369}
370
371static void
372rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
373 enum chain_mode mode)
374{
375 struct rb_node **p = &root->rb_node;
376 struct rb_node *parent = NULL;
377 struct callchain_node *rnode;
378 u64 chain_cumul = callchain_cumul_hits(chain);
379
380 while (*p) {
381 u64 rnode_cumul;
382
383 parent = *p;
384 rnode = rb_entry(parent, struct callchain_node, rb_node);
385 rnode_cumul = callchain_cumul_hits(rnode);
386
387 switch (mode) {
388 case CHAIN_FLAT:
389 case CHAIN_FOLDED:
390 if (rnode->hit < chain->hit)
391 p = &(*p)->rb_left;
392 else
393 p = &(*p)->rb_right;
394 break;
395 case CHAIN_GRAPH_ABS: /* Falldown */
396 case CHAIN_GRAPH_REL:
397 if (rnode_cumul < chain_cumul)
398 p = &(*p)->rb_left;
399 else
400 p = &(*p)->rb_right;
401 break;
402 case CHAIN_NONE:
403 default:
404 break;
405 }
406 }
407
408 rb_link_node(&chain->rb_node, parent, p);
409 rb_insert_color(&chain->rb_node, root);
410}
411
412static void
413__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
414 u64 min_hit)
415{
416 struct rb_node *n;
417 struct callchain_node *child;
418
419 n = rb_first(&node->rb_root_in);
420 while (n) {
421 child = rb_entry(n, struct callchain_node, rb_node_in);
422 n = rb_next(n);
423
424 __sort_chain_flat(rb_root, child, min_hit);
425 }
426
427 if (node->hit && node->hit >= min_hit)
428 rb_insert_callchain(rb_root, node, CHAIN_FLAT);
429}
430
431/*
432 * Once we get every callchains from the stream, we can now
433 * sort them by hit
434 */
435static void
436sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
437 u64 min_hit, struct callchain_param *param __maybe_unused)
438{
439 *rb_root = RB_ROOT;
440 __sort_chain_flat(rb_root, &root->node, min_hit);
441}
442
443static void __sort_chain_graph_abs(struct callchain_node *node,
444 u64 min_hit)
445{
446 struct rb_node *n;
447 struct callchain_node *child;
448
449 node->rb_root = RB_ROOT;
450 n = rb_first(&node->rb_root_in);
451
452 while (n) {
453 child = rb_entry(n, struct callchain_node, rb_node_in);
454 n = rb_next(n);
455
456 __sort_chain_graph_abs(child, min_hit);
457 if (callchain_cumul_hits(child) >= min_hit)
458 rb_insert_callchain(&node->rb_root, child,
459 CHAIN_GRAPH_ABS);
460 }
461}
462
463static void
464sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
465 u64 min_hit, struct callchain_param *param __maybe_unused)
466{
467 __sort_chain_graph_abs(&chain_root->node, min_hit);
468 rb_root->rb_node = chain_root->node.rb_root.rb_node;
469}
470
471static void __sort_chain_graph_rel(struct callchain_node *node,
472 double min_percent)
473{
474 struct rb_node *n;
475 struct callchain_node *child;
476 u64 min_hit;
477
478 node->rb_root = RB_ROOT;
479 min_hit = ceil(node->children_hit * min_percent);
480
481 n = rb_first(&node->rb_root_in);
482 while (n) {
483 child = rb_entry(n, struct callchain_node, rb_node_in);
484 n = rb_next(n);
485
486 __sort_chain_graph_rel(child, min_percent);
487 if (callchain_cumul_hits(child) >= min_hit)
488 rb_insert_callchain(&node->rb_root, child,
489 CHAIN_GRAPH_REL);
490 }
491}
492
493static void
494sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
495 u64 min_hit __maybe_unused, struct callchain_param *param)
496{
497 __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
498 rb_root->rb_node = chain_root->node.rb_root.rb_node;
499}
500
501int callchain_register_param(struct callchain_param *param)
502{
503 switch (param->mode) {
504 case CHAIN_GRAPH_ABS:
505 param->sort = sort_chain_graph_abs;
506 break;
507 case CHAIN_GRAPH_REL:
508 param->sort = sort_chain_graph_rel;
509 break;
510 case CHAIN_FLAT:
511 case CHAIN_FOLDED:
512 param->sort = sort_chain_flat;
513 break;
514 case CHAIN_NONE:
515 default:
516 return -1;
517 }
518 return 0;
519}
520
521/*
522 * Create a child for a parent. If inherit_children, then the new child
523 * will become the new parent of it's parent children
524 */
525static struct callchain_node *
526create_child(struct callchain_node *parent, bool inherit_children)
527{
528 struct callchain_node *new;
529
530 new = zalloc(sizeof(*new));
531 if (!new) {
532 perror("not enough memory to create child for code path tree");
533 return NULL;
534 }
535 new->parent = parent;
536 INIT_LIST_HEAD(&new->val);
537 INIT_LIST_HEAD(&new->parent_val);
538
539 if (inherit_children) {
540 struct rb_node *n;
541 struct callchain_node *child;
542
543 new->rb_root_in = parent->rb_root_in;
544 parent->rb_root_in = RB_ROOT;
545
546 n = rb_first(&new->rb_root_in);
547 while (n) {
548 child = rb_entry(n, struct callchain_node, rb_node_in);
549 child->parent = new;
550 n = rb_next(n);
551 }
552
553 /* make it the first child */
554 rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
555 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
556 }
557
558 return new;
559}
560
561
562/*
563 * Fill the node with callchain values
564 */
565static int
566fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
567{
568 struct callchain_cursor_node *cursor_node;
569
570 node->val_nr = cursor->nr - cursor->pos;
571 if (!node->val_nr)
572 pr_warning("Warning: empty node in callchain tree\n");
573
574 cursor_node = callchain_cursor_current(cursor);
575
576 while (cursor_node) {
577 struct callchain_list *call;
578
579 call = zalloc(sizeof(*call));
580 if (!call) {
581 perror("not enough memory for the code path tree");
582 return -1;
583 }
584 call->ip = cursor_node->ip;
585 call->ms.sym = cursor_node->sym;
586 call->ms.map = map__get(cursor_node->map);
587 call->srcline = cursor_node->srcline;
588
589 if (cursor_node->branch) {
590 call->branch_count = 1;
591
592 if (cursor_node->branch_from) {
593 /*
594 * branch_from is set with value somewhere else
595 * to imply it's "to" of a branch.
596 */
597 call->brtype_stat.branch_to = true;
598
599 if (cursor_node->branch_flags.predicted)
600 call->predicted_count = 1;
601
602 if (cursor_node->branch_flags.abort)
603 call->abort_count = 1;
604
605 branch_type_count(&call->brtype_stat,
606 &cursor_node->branch_flags,
607 cursor_node->branch_from,
608 cursor_node->ip);
609 } else {
610 /*
611 * It's "from" of a branch
612 */
613 call->brtype_stat.branch_to = false;
614 call->cycles_count =
615 cursor_node->branch_flags.cycles;
616 call->iter_count = cursor_node->nr_loop_iter;
617 call->iter_cycles = cursor_node->iter_cycles;
618 }
619 }
620
621 list_add_tail(&call->list, &node->val);
622
623 callchain_cursor_advance(cursor);
624 cursor_node = callchain_cursor_current(cursor);
625 }
626 return 0;
627}
628
629static struct callchain_node *
630add_child(struct callchain_node *parent,
631 struct callchain_cursor *cursor,
632 u64 period)
633{
634 struct callchain_node *new;
635
636 new = create_child(parent, false);
637 if (new == NULL)
638 return NULL;
639
640 if (fill_node(new, cursor) < 0) {
641 struct callchain_list *call, *tmp;
642
643 list_for_each_entry_safe(call, tmp, &new->val, list) {
644 list_del_init(&call->list);
645 map__zput(call->ms.map);
646 free(call);
647 }
648 free(new);
649 return NULL;
650 }
651
652 new->children_hit = 0;
653 new->hit = period;
654 new->children_count = 0;
655 new->count = 1;
656 return new;
657}
658
659enum match_result {
660 MATCH_ERROR = -1,
661 MATCH_EQ,
662 MATCH_LT,
663 MATCH_GT,
664};
665
666static enum match_result match_chain_strings(const char *left,
667 const char *right)
668{
669 enum match_result ret = MATCH_EQ;
670 int cmp;
671
672 if (left && right)
673 cmp = strcmp(left, right);
674 else if (!left && right)
675 cmp = 1;
676 else if (left && !right)
677 cmp = -1;
678 else
679 return MATCH_ERROR;
680
681 if (cmp != 0)
682 ret = cmp < 0 ? MATCH_LT : MATCH_GT;
683
684 return ret;
685}
686
687/*
688 * We need to always use relative addresses because we're aggregating
689 * callchains from multiple threads, i.e. different address spaces, so
690 * comparing absolute addresses make no sense as a symbol in a DSO may end up
691 * in a different address when used in a different binary or even the same
692 * binary but with some sort of address randomization technique, thus we need
693 * to compare just relative addresses. -acme
694 */
695static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
696 struct map *right_map, u64 right_ip)
697{
698 struct dso *left_dso = left_map ? left_map->dso : NULL;
699 struct dso *right_dso = right_map ? right_map->dso : NULL;
700
701 if (left_dso != right_dso)
702 return left_dso < right_dso ? MATCH_LT : MATCH_GT;
703
704 if (left_ip != right_ip)
705 return left_ip < right_ip ? MATCH_LT : MATCH_GT;
706
707 return MATCH_EQ;
708}
709
710static enum match_result match_chain(struct callchain_cursor_node *node,
711 struct callchain_list *cnode)
712{
713 enum match_result match = MATCH_ERROR;
714
715 switch (callchain_param.key) {
716 case CCKEY_SRCLINE:
717 match = match_chain_strings(cnode->srcline, node->srcline);
718 if (match != MATCH_ERROR)
719 break;
720 /* otherwise fall-back to symbol-based comparison below */
721 __fallthrough;
722 case CCKEY_FUNCTION:
723 if (node->sym && cnode->ms.sym) {
724 /*
725 * Compare inlined frames based on their symbol name
726 * because different inlined frames will have the same
727 * symbol start. Otherwise do a faster comparison based
728 * on the symbol start address.
729 */
730 if (cnode->ms.sym->inlined || node->sym->inlined) {
731 match = match_chain_strings(cnode->ms.sym->name,
732 node->sym->name);
733 if (match != MATCH_ERROR)
734 break;
735 } else {
736 match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
737 node->map, node->sym->start);
738 break;
739 }
740 }
741 /* otherwise fall-back to IP-based comparison below */
742 __fallthrough;
743 case CCKEY_ADDRESS:
744 default:
745 match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
746 break;
747 }
748
749 if (match == MATCH_EQ && node->branch) {
750 cnode->branch_count++;
751
752 if (node->branch_from) {
753 /*
754 * It's "to" of a branch
755 */
756 cnode->brtype_stat.branch_to = true;
757
758 if (node->branch_flags.predicted)
759 cnode->predicted_count++;
760
761 if (node->branch_flags.abort)
762 cnode->abort_count++;
763
764 branch_type_count(&cnode->brtype_stat,
765 &node->branch_flags,
766 node->branch_from,
767 node->ip);
768 } else {
769 /*
770 * It's "from" of a branch
771 */
772 cnode->brtype_stat.branch_to = false;
773 cnode->cycles_count += node->branch_flags.cycles;
774 cnode->iter_count += node->nr_loop_iter;
775 cnode->iter_cycles += node->iter_cycles;
776 cnode->from_count++;
777 }
778 }
779
780 return match;
781}
782
783/*
784 * Split the parent in two parts (a new child is created) and
785 * give a part of its callchain to the created child.
786 * Then create another child to host the given callchain of new branch
787 */
788static int
789split_add_child(struct callchain_node *parent,
790 struct callchain_cursor *cursor,
791 struct callchain_list *to_split,
792 u64 idx_parents, u64 idx_local, u64 period)
793{
794 struct callchain_node *new;
795 struct list_head *old_tail;
796 unsigned int idx_total = idx_parents + idx_local;
797
798 /* split */
799 new = create_child(parent, true);
800 if (new == NULL)
801 return -1;
802
803 /* split the callchain and move a part to the new child */
804 old_tail = parent->val.prev;
805 list_del_range(&to_split->list, old_tail);
806 new->val.next = &to_split->list;
807 new->val.prev = old_tail;
808 to_split->list.prev = &new->val;
809 old_tail->next = &new->val;
810
811 /* split the hits */
812 new->hit = parent->hit;
813 new->children_hit = parent->children_hit;
814 parent->children_hit = callchain_cumul_hits(new);
815 new->val_nr = parent->val_nr - idx_local;
816 parent->val_nr = idx_local;
817 new->count = parent->count;
818 new->children_count = parent->children_count;
819 parent->children_count = callchain_cumul_counts(new);
820
821 /* create a new child for the new branch if any */
822 if (idx_total < cursor->nr) {
823 struct callchain_node *first;
824 struct callchain_list *cnode;
825 struct callchain_cursor_node *node;
826 struct rb_node *p, **pp;
827
828 parent->hit = 0;
829 parent->children_hit += period;
830 parent->count = 0;
831 parent->children_count += 1;
832
833 node = callchain_cursor_current(cursor);
834 new = add_child(parent, cursor, period);
835 if (new == NULL)
836 return -1;
837
838 /*
839 * This is second child since we moved parent's children
840 * to new (first) child above.
841 */
842 p = parent->rb_root_in.rb_node;
843 first = rb_entry(p, struct callchain_node, rb_node_in);
844 cnode = list_first_entry(&first->val, struct callchain_list,
845 list);
846
847 if (match_chain(node, cnode) == MATCH_LT)
848 pp = &p->rb_left;
849 else
850 pp = &p->rb_right;
851
852 rb_link_node(&new->rb_node_in, p, pp);
853 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
854 } else {
855 parent->hit = period;
856 parent->count = 1;
857 }
858 return 0;
859}
860
861static enum match_result
862append_chain(struct callchain_node *root,
863 struct callchain_cursor *cursor,
864 u64 period);
865
866static int
867append_chain_children(struct callchain_node *root,
868 struct callchain_cursor *cursor,
869 u64 period)
870{
871 struct callchain_node *rnode;
872 struct callchain_cursor_node *node;
873 struct rb_node **p = &root->rb_root_in.rb_node;
874 struct rb_node *parent = NULL;
875
876 node = callchain_cursor_current(cursor);
877 if (!node)
878 return -1;
879
880 /* lookup in childrens */
881 while (*p) {
882 enum match_result ret;
883
884 parent = *p;
885 rnode = rb_entry(parent, struct callchain_node, rb_node_in);
886
887 /* If at least first entry matches, rely to children */
888 ret = append_chain(rnode, cursor, period);
889 if (ret == MATCH_EQ)
890 goto inc_children_hit;
891 if (ret == MATCH_ERROR)
892 return -1;
893
894 if (ret == MATCH_LT)
895 p = &parent->rb_left;
896 else
897 p = &parent->rb_right;
898 }
899 /* nothing in children, add to the current node */
900 rnode = add_child(root, cursor, period);
901 if (rnode == NULL)
902 return -1;
903
904 rb_link_node(&rnode->rb_node_in, parent, p);
905 rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
906
907inc_children_hit:
908 root->children_hit += period;
909 root->children_count++;
910 return 0;
911}
912
913static enum match_result
914append_chain(struct callchain_node *root,
915 struct callchain_cursor *cursor,
916 u64 period)
917{
918 struct callchain_list *cnode;
919 u64 start = cursor->pos;
920 bool found = false;
921 u64 matches;
922 enum match_result cmp = MATCH_ERROR;
923
924 /*
925 * Lookup in the current node
926 * If we have a symbol, then compare the start to match
927 * anywhere inside a function, unless function
928 * mode is disabled.
929 */
930 list_for_each_entry(cnode, &root->val, list) {
931 struct callchain_cursor_node *node;
932
933 node = callchain_cursor_current(cursor);
934 if (!node)
935 break;
936
937 cmp = match_chain(node, cnode);
938 if (cmp != MATCH_EQ)
939 break;
940
941 found = true;
942
943 callchain_cursor_advance(cursor);
944 }
945
946 /* matches not, relay no the parent */
947 if (!found) {
948 WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
949 return cmp;
950 }
951
952 matches = cursor->pos - start;
953
954 /* we match only a part of the node. Split it and add the new chain */
955 if (matches < root->val_nr) {
956 if (split_add_child(root, cursor, cnode, start, matches,
957 period) < 0)
958 return MATCH_ERROR;
959
960 return MATCH_EQ;
961 }
962
963 /* we match 100% of the path, increment the hit */
964 if (matches == root->val_nr && cursor->pos == cursor->nr) {
965 root->hit += period;
966 root->count++;
967 return MATCH_EQ;
968 }
969
970 /* We match the node and still have a part remaining */
971 if (append_chain_children(root, cursor, period) < 0)
972 return MATCH_ERROR;
973
974 return MATCH_EQ;
975}
976
977int callchain_append(struct callchain_root *root,
978 struct callchain_cursor *cursor,
979 u64 period)
980{
981 if (!cursor->nr)
982 return 0;
983
984 callchain_cursor_commit(cursor);
985
986 if (append_chain_children(&root->node, cursor, period) < 0)
987 return -1;
988
989 if (cursor->nr > root->max_depth)
990 root->max_depth = cursor->nr;
991
992 return 0;
993}
994
995static int
996merge_chain_branch(struct callchain_cursor *cursor,
997 struct callchain_node *dst, struct callchain_node *src)
998{
999 struct callchain_cursor_node **old_last = cursor->last;
1000 struct callchain_node *child;
1001 struct callchain_list *list, *next_list;
1002 struct rb_node *n;
1003 int old_pos = cursor->nr;
1004 int err = 0;
1005
1006 list_for_each_entry_safe(list, next_list, &src->val, list) {
1007 callchain_cursor_append(cursor, list->ip,
1008 list->ms.map, list->ms.sym,
1009 false, NULL, 0, 0, 0, list->srcline);
1010 list_del_init(&list->list);
1011 map__zput(list->ms.map);
1012 free(list);
1013 }
1014
1015 if (src->hit) {
1016 callchain_cursor_commit(cursor);
1017 if (append_chain_children(dst, cursor, src->hit) < 0)
1018 return -1;
1019 }
1020
1021 n = rb_first(&src->rb_root_in);
1022 while (n) {
1023 child = container_of(n, struct callchain_node, rb_node_in);
1024 n = rb_next(n);
1025 rb_erase(&child->rb_node_in, &src->rb_root_in);
1026
1027 err = merge_chain_branch(cursor, dst, child);
1028 if (err)
1029 break;
1030
1031 free(child);
1032 }
1033
1034 cursor->nr = old_pos;
1035 cursor->last = old_last;
1036
1037 return err;
1038}
1039
1040int callchain_merge(struct callchain_cursor *cursor,
1041 struct callchain_root *dst, struct callchain_root *src)
1042{
1043 return merge_chain_branch(cursor, &dst->node, &src->node);
1044}
1045
1046int callchain_cursor_append(struct callchain_cursor *cursor,
1047 u64 ip, struct map *map, struct symbol *sym,
1048 bool branch, struct branch_flags *flags,
1049 int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1050 const char *srcline)
1051{
1052 struct callchain_cursor_node *node = *cursor->last;
1053
1054 if (!node) {
1055 node = calloc(1, sizeof(*node));
1056 if (!node)
1057 return -ENOMEM;
1058
1059 *cursor->last = node;
1060 }
1061
1062 node->ip = ip;
1063 map__zput(node->map);
1064 node->map = map__get(map);
1065 node->sym = sym;
1066 node->branch = branch;
1067 node->nr_loop_iter = nr_loop_iter;
1068 node->iter_cycles = iter_cycles;
1069 node->srcline = srcline;
1070
1071 if (flags)
1072 memcpy(&node->branch_flags, flags,
1073 sizeof(struct branch_flags));
1074
1075 node->branch_from = branch_from;
1076 cursor->nr++;
1077
1078 cursor->last = &node->next;
1079
1080 return 0;
1081}
1082
1083int sample__resolve_callchain(struct perf_sample *sample,
1084 struct callchain_cursor *cursor, struct symbol **parent,
1085 struct evsel *evsel, struct addr_location *al,
1086 int max_stack)
1087{
1088 if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1089 return 0;
1090
1091 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1092 perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1093 return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1094 parent, al, max_stack);
1095 }
1096 return 0;
1097}
1098
1099int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1100{
1101 if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1102 !symbol_conf.show_branchflag_count)
1103 return 0;
1104 return callchain_append(he->callchain, &callchain_cursor, sample->period);
1105}
1106
1107int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1108 bool hide_unresolved)
1109{
1110 al->map = node->map;
1111 al->sym = node->sym;
1112 al->srcline = node->srcline;
1113 al->addr = node->ip;
1114
1115 if (al->sym == NULL) {
1116 if (hide_unresolved)
1117 return 0;
1118 if (al->map == NULL)
1119 goto out;
1120 }
1121
1122 if (al->map->groups == &al->machine->kmaps) {
1123 if (machine__is_host(al->machine)) {
1124 al->cpumode = PERF_RECORD_MISC_KERNEL;
1125 al->level = 'k';
1126 } else {
1127 al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1128 al->level = 'g';
1129 }
1130 } else {
1131 if (machine__is_host(al->machine)) {
1132 al->cpumode = PERF_RECORD_MISC_USER;
1133 al->level = '.';
1134 } else if (perf_guest) {
1135 al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1136 al->level = 'u';
1137 } else {
1138 al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1139 al->level = 'H';
1140 }
1141 }
1142
1143out:
1144 return 1;
1145}
1146
1147char *callchain_list__sym_name(struct callchain_list *cl,
1148 char *bf, size_t bfsize, bool show_dso)
1149{
1150 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1151 bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1152 int printed;
1153
1154 if (cl->ms.sym) {
1155 const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1156
1157 if (show_srcline && cl->srcline)
1158 printed = scnprintf(bf, bfsize, "%s %s%s",
1159 cl->ms.sym->name, cl->srcline,
1160 inlined);
1161 else
1162 printed = scnprintf(bf, bfsize, "%s%s",
1163 cl->ms.sym->name, inlined);
1164 } else
1165 printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1166
1167 if (show_dso)
1168 scnprintf(bf + printed, bfsize - printed, " %s",
1169 cl->ms.map ?
1170 cl->ms.map->dso->short_name :
1171 "unknown");
1172
1173 return bf;
1174}
1175
1176char *callchain_node__scnprintf_value(struct callchain_node *node,
1177 char *bf, size_t bfsize, u64 total)
1178{
1179 double percent = 0.0;
1180 u64 period = callchain_cumul_hits(node);
1181 unsigned count = callchain_cumul_counts(node);
1182
1183 if (callchain_param.mode == CHAIN_FOLDED) {
1184 period = node->hit;
1185 count = node->count;
1186 }
1187
1188 switch (callchain_param.value) {
1189 case CCVAL_PERIOD:
1190 scnprintf(bf, bfsize, "%"PRIu64, period);
1191 break;
1192 case CCVAL_COUNT:
1193 scnprintf(bf, bfsize, "%u", count);
1194 break;
1195 case CCVAL_PERCENT:
1196 default:
1197 if (total)
1198 percent = period * 100.0 / total;
1199 scnprintf(bf, bfsize, "%.2f%%", percent);
1200 break;
1201 }
1202 return bf;
1203}
1204
1205int callchain_node__fprintf_value(struct callchain_node *node,
1206 FILE *fp, u64 total)
1207{
1208 double percent = 0.0;
1209 u64 period = callchain_cumul_hits(node);
1210 unsigned count = callchain_cumul_counts(node);
1211
1212 if (callchain_param.mode == CHAIN_FOLDED) {
1213 period = node->hit;
1214 count = node->count;
1215 }
1216
1217 switch (callchain_param.value) {
1218 case CCVAL_PERIOD:
1219 return fprintf(fp, "%"PRIu64, period);
1220 case CCVAL_COUNT:
1221 return fprintf(fp, "%u", count);
1222 case CCVAL_PERCENT:
1223 default:
1224 if (total)
1225 percent = period * 100.0 / total;
1226 return percent_color_fprintf(fp, "%.2f%%", percent);
1227 }
1228 return 0;
1229}
1230
1231static void callchain_counts_value(struct callchain_node *node,
1232 u64 *branch_count, u64 *predicted_count,
1233 u64 *abort_count, u64 *cycles_count)
1234{
1235 struct callchain_list *clist;
1236
1237 list_for_each_entry(clist, &node->val, list) {
1238 if (branch_count)
1239 *branch_count += clist->branch_count;
1240
1241 if (predicted_count)
1242 *predicted_count += clist->predicted_count;
1243
1244 if (abort_count)
1245 *abort_count += clist->abort_count;
1246
1247 if (cycles_count)
1248 *cycles_count += clist->cycles_count;
1249 }
1250}
1251
1252static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1253 u64 *branch_count,
1254 u64 *predicted_count,
1255 u64 *abort_count,
1256 u64 *cycles_count)
1257{
1258 struct callchain_node *child;
1259 struct rb_node *n;
1260
1261 n = rb_first(&node->rb_root_in);
1262 while (n) {
1263 child = rb_entry(n, struct callchain_node, rb_node_in);
1264 n = rb_next(n);
1265
1266 callchain_node_branch_counts_cumul(child, branch_count,
1267 predicted_count,
1268 abort_count,
1269 cycles_count);
1270
1271 callchain_counts_value(child, branch_count,
1272 predicted_count, abort_count,
1273 cycles_count);
1274 }
1275
1276 return 0;
1277}
1278
1279int callchain_branch_counts(struct callchain_root *root,
1280 u64 *branch_count, u64 *predicted_count,
1281 u64 *abort_count, u64 *cycles_count)
1282{
1283 if (branch_count)
1284 *branch_count = 0;
1285
1286 if (predicted_count)
1287 *predicted_count = 0;
1288
1289 if (abort_count)
1290 *abort_count = 0;
1291
1292 if (cycles_count)
1293 *cycles_count = 0;
1294
1295 return callchain_node_branch_counts_cumul(&root->node,
1296 branch_count,
1297 predicted_count,
1298 abort_count,
1299 cycles_count);
1300}
1301
1302static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1303{
1304 int printed;
1305
1306 printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1307
1308 return printed;
1309}
1310
1311static int count_float_printf(int idx, const char *str, float value,
1312 char *bf, int bfsize, float threshold)
1313{
1314 int printed;
1315
1316 if (threshold != 0.0 && value < threshold)
1317 return 0;
1318
1319 printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1320
1321 return printed;
1322}
1323
1324static int branch_to_str(char *bf, int bfsize,
1325 u64 branch_count, u64 predicted_count,
1326 u64 abort_count,
1327 struct branch_type_stat *brtype_stat)
1328{
1329 int printed, i = 0;
1330
1331 printed = branch_type_str(brtype_stat, bf, bfsize);
1332 if (printed)
1333 i++;
1334
1335 if (predicted_count < branch_count) {
1336 printed += count_float_printf(i++, "predicted",
1337 predicted_count * 100.0 / branch_count,
1338 bf + printed, bfsize - printed, 0.0);
1339 }
1340
1341 if (abort_count) {
1342 printed += count_float_printf(i++, "abort",
1343 abort_count * 100.0 / branch_count,
1344 bf + printed, bfsize - printed, 0.1);
1345 }
1346
1347 if (i)
1348 printed += scnprintf(bf + printed, bfsize - printed, ")");
1349
1350 return printed;
1351}
1352
1353static int branch_from_str(char *bf, int bfsize,
1354 u64 branch_count,
1355 u64 cycles_count, u64 iter_count,
1356 u64 iter_cycles, u64 from_count)
1357{
1358 int printed = 0, i = 0;
1359 u64 cycles, v = 0;
1360
1361 cycles = cycles_count / branch_count;
1362 if (cycles) {
1363 printed += count_pri64_printf(i++, "cycles",
1364 cycles,
1365 bf + printed, bfsize - printed);
1366 }
1367
1368 if (iter_count && from_count) {
1369 v = iter_count / from_count;
1370 if (v) {
1371 printed += count_pri64_printf(i++, "iter",
1372 v, bf + printed, bfsize - printed);
1373
1374 printed += count_pri64_printf(i++, "avg_cycles",
1375 iter_cycles / iter_count,
1376 bf + printed, bfsize - printed);
1377 }
1378 }
1379
1380 if (i)
1381 printed += scnprintf(bf + printed, bfsize - printed, ")");
1382
1383 return printed;
1384}
1385
1386static int counts_str_build(char *bf, int bfsize,
1387 u64 branch_count, u64 predicted_count,
1388 u64 abort_count, u64 cycles_count,
1389 u64 iter_count, u64 iter_cycles,
1390 u64 from_count,
1391 struct branch_type_stat *brtype_stat)
1392{
1393 int printed;
1394
1395 if (branch_count == 0)
1396 return scnprintf(bf, bfsize, " (calltrace)");
1397
1398 if (brtype_stat->branch_to) {
1399 printed = branch_to_str(bf, bfsize, branch_count,
1400 predicted_count, abort_count, brtype_stat);
1401 } else {
1402 printed = branch_from_str(bf, bfsize, branch_count,
1403 cycles_count, iter_count, iter_cycles,
1404 from_count);
1405 }
1406
1407 if (!printed)
1408 bf[0] = 0;
1409
1410 return printed;
1411}
1412
1413static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1414 u64 branch_count, u64 predicted_count,
1415 u64 abort_count, u64 cycles_count,
1416 u64 iter_count, u64 iter_cycles,
1417 u64 from_count,
1418 struct branch_type_stat *brtype_stat)
1419{
1420 char str[256];
1421
1422 counts_str_build(str, sizeof(str), branch_count,
1423 predicted_count, abort_count, cycles_count,
1424 iter_count, iter_cycles, from_count, brtype_stat);
1425
1426 if (fp)
1427 return fprintf(fp, "%s", str);
1428
1429 return scnprintf(bf, bfsize, "%s", str);
1430}
1431
1432int callchain_list_counts__printf_value(struct callchain_list *clist,
1433 FILE *fp, char *bf, int bfsize)
1434{
1435 u64 branch_count, predicted_count;
1436 u64 abort_count, cycles_count;
1437 u64 iter_count, iter_cycles;
1438 u64 from_count;
1439
1440 branch_count = clist->branch_count;
1441 predicted_count = clist->predicted_count;
1442 abort_count = clist->abort_count;
1443 cycles_count = clist->cycles_count;
1444 iter_count = clist->iter_count;
1445 iter_cycles = clist->iter_cycles;
1446 from_count = clist->from_count;
1447
1448 return callchain_counts_printf(fp, bf, bfsize, branch_count,
1449 predicted_count, abort_count,
1450 cycles_count, iter_count, iter_cycles,
1451 from_count, &clist->brtype_stat);
1452}
1453
1454static void free_callchain_node(struct callchain_node *node)
1455{
1456 struct callchain_list *list, *tmp;
1457 struct callchain_node *child;
1458 struct rb_node *n;
1459
1460 list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1461 list_del_init(&list->list);
1462 map__zput(list->ms.map);
1463 free(list);
1464 }
1465
1466 list_for_each_entry_safe(list, tmp, &node->val, list) {
1467 list_del_init(&list->list);
1468 map__zput(list->ms.map);
1469 free(list);
1470 }
1471
1472 n = rb_first(&node->rb_root_in);
1473 while (n) {
1474 child = container_of(n, struct callchain_node, rb_node_in);
1475 n = rb_next(n);
1476 rb_erase(&child->rb_node_in, &node->rb_root_in);
1477
1478 free_callchain_node(child);
1479 free(child);
1480 }
1481}
1482
1483void free_callchain(struct callchain_root *root)
1484{
1485 if (!symbol_conf.use_callchain)
1486 return;
1487
1488 free_callchain_node(&root->node);
1489}
1490
1491static u64 decay_callchain_node(struct callchain_node *node)
1492{
1493 struct callchain_node *child;
1494 struct rb_node *n;
1495 u64 child_hits = 0;
1496
1497 n = rb_first(&node->rb_root_in);
1498 while (n) {
1499 child = container_of(n, struct callchain_node, rb_node_in);
1500
1501 child_hits += decay_callchain_node(child);
1502 n = rb_next(n);
1503 }
1504
1505 node->hit = (node->hit * 7) / 8;
1506 node->children_hit = child_hits;
1507
1508 return node->hit;
1509}
1510
1511void decay_callchain(struct callchain_root *root)
1512{
1513 if (!symbol_conf.use_callchain)
1514 return;
1515
1516 decay_callchain_node(&root->node);
1517}
1518
1519int callchain_node__make_parent_list(struct callchain_node *node)
1520{
1521 struct callchain_node *parent = node->parent;
1522 struct callchain_list *chain, *new;
1523 LIST_HEAD(head);
1524
1525 while (parent) {
1526 list_for_each_entry_reverse(chain, &parent->val, list) {
1527 new = malloc(sizeof(*new));
1528 if (new == NULL)
1529 goto out;
1530 *new = *chain;
1531 new->has_children = false;
1532 map__get(new->ms.map);
1533 list_add_tail(&new->list, &head);
1534 }
1535 parent = parent->parent;
1536 }
1537
1538 list_for_each_entry_safe_reverse(chain, new, &head, list)
1539 list_move_tail(&chain->list, &node->parent_val);
1540
1541 if (!list_empty(&node->parent_val)) {
1542 chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1543 chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1544
1545 chain = list_first_entry(&node->val, struct callchain_list, list);
1546 chain->has_children = false;
1547 }
1548 return 0;
1549
1550out:
1551 list_for_each_entry_safe(chain, new, &head, list) {
1552 list_del_init(&chain->list);
1553 map__zput(chain->ms.map);
1554 free(chain);
1555 }
1556 return -ENOMEM;
1557}
1558
1559int callchain_cursor__copy(struct callchain_cursor *dst,
1560 struct callchain_cursor *src)
1561{
1562 int rc = 0;
1563
1564 callchain_cursor_reset(dst);
1565 callchain_cursor_commit(src);
1566
1567 while (true) {
1568 struct callchain_cursor_node *node;
1569
1570 node = callchain_cursor_current(src);
1571 if (node == NULL)
1572 break;
1573
1574 rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
1575 node->branch, &node->branch_flags,
1576 node->nr_loop_iter,
1577 node->iter_cycles,
1578 node->branch_from, node->srcline);
1579 if (rc)
1580 break;
1581
1582 callchain_cursor_advance(src);
1583 }
1584
1585 return rc;
1586}
1587
1588/*
1589 * Initialize a cursor before adding entries inside, but keep
1590 * the previously allocated entries as a cache.
1591 */
1592void callchain_cursor_reset(struct callchain_cursor *cursor)
1593{
1594 struct callchain_cursor_node *node;
1595
1596 cursor->nr = 0;
1597 cursor->last = &cursor->first;
1598
1599 for (node = cursor->first; node != NULL; node = node->next)
1600 map__zput(node->map);
1601}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
4 *
5 * Handle the callchains from the stream in an ad-hoc radix tree and then
6 * sort them in an rbtree.
7 *
8 * Using a radix for code path provides a fast retrieval and factorizes
9 * memory use. Also that lets us use the paths in a hierarchical graph view.
10 *
11 */
12
13#include <inttypes.h>
14#include <stdlib.h>
15#include <stdio.h>
16#include <stdbool.h>
17#include <errno.h>
18#include <math.h>
19#include <linux/string.h>
20#include <linux/zalloc.h>
21
22#include "asm/bug.h"
23
24#include "debug.h"
25#include "dso.h"
26#include "event.h"
27#include "hist.h"
28#include "sort.h"
29#include "machine.h"
30#include "map.h"
31#include "callchain.h"
32#include "branch.h"
33#include "symbol.h"
34#include "util.h"
35#include "../perf.h"
36
37#define CALLCHAIN_PARAM_DEFAULT \
38 .mode = CHAIN_GRAPH_ABS, \
39 .min_percent = 0.5, \
40 .order = ORDER_CALLEE, \
41 .key = CCKEY_FUNCTION, \
42 .value = CCVAL_PERCENT, \
43
44struct callchain_param callchain_param = {
45 CALLCHAIN_PARAM_DEFAULT
46};
47
48/*
49 * Are there any events usind DWARF callchains?
50 *
51 * I.e.
52 *
53 * -e cycles/call-graph=dwarf/
54 */
55bool dwarf_callchain_users;
56
57struct callchain_param callchain_param_default = {
58 CALLCHAIN_PARAM_DEFAULT
59};
60
61__thread struct callchain_cursor callchain_cursor;
62
63int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
64{
65 return parse_callchain_record(arg, param);
66}
67
68static int parse_callchain_mode(const char *value)
69{
70 if (!strncmp(value, "graph", strlen(value))) {
71 callchain_param.mode = CHAIN_GRAPH_ABS;
72 return 0;
73 }
74 if (!strncmp(value, "flat", strlen(value))) {
75 callchain_param.mode = CHAIN_FLAT;
76 return 0;
77 }
78 if (!strncmp(value, "fractal", strlen(value))) {
79 callchain_param.mode = CHAIN_GRAPH_REL;
80 return 0;
81 }
82 if (!strncmp(value, "folded", strlen(value))) {
83 callchain_param.mode = CHAIN_FOLDED;
84 return 0;
85 }
86 return -1;
87}
88
89static int parse_callchain_order(const char *value)
90{
91 if (!strncmp(value, "caller", strlen(value))) {
92 callchain_param.order = ORDER_CALLER;
93 callchain_param.order_set = true;
94 return 0;
95 }
96 if (!strncmp(value, "callee", strlen(value))) {
97 callchain_param.order = ORDER_CALLEE;
98 callchain_param.order_set = true;
99 return 0;
100 }
101 return -1;
102}
103
104static int parse_callchain_sort_key(const char *value)
105{
106 if (!strncmp(value, "function", strlen(value))) {
107 callchain_param.key = CCKEY_FUNCTION;
108 return 0;
109 }
110 if (!strncmp(value, "address", strlen(value))) {
111 callchain_param.key = CCKEY_ADDRESS;
112 return 0;
113 }
114 if (!strncmp(value, "srcline", strlen(value))) {
115 callchain_param.key = CCKEY_SRCLINE;
116 return 0;
117 }
118 if (!strncmp(value, "branch", strlen(value))) {
119 callchain_param.branch_callstack = 1;
120 return 0;
121 }
122 return -1;
123}
124
125static int parse_callchain_value(const char *value)
126{
127 if (!strncmp(value, "percent", strlen(value))) {
128 callchain_param.value = CCVAL_PERCENT;
129 return 0;
130 }
131 if (!strncmp(value, "period", strlen(value))) {
132 callchain_param.value = CCVAL_PERIOD;
133 return 0;
134 }
135 if (!strncmp(value, "count", strlen(value))) {
136 callchain_param.value = CCVAL_COUNT;
137 return 0;
138 }
139 return -1;
140}
141
142static int get_stack_size(const char *str, unsigned long *_size)
143{
144 char *endptr;
145 unsigned long size;
146 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
147
148 size = strtoul(str, &endptr, 0);
149
150 do {
151 if (*endptr)
152 break;
153
154 size = round_up(size, sizeof(u64));
155 if (!size || size > max_size)
156 break;
157
158 *_size = size;
159 return 0;
160
161 } while (0);
162
163 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
164 max_size, str);
165 return -1;
166}
167
168static int
169__parse_callchain_report_opt(const char *arg, bool allow_record_opt)
170{
171 char *tok;
172 char *endptr, *saveptr = NULL;
173 bool minpcnt_set = false;
174 bool record_opt_set = false;
175 bool try_stack_size = false;
176
177 callchain_param.enabled = true;
178 symbol_conf.use_callchain = true;
179
180 if (!arg)
181 return 0;
182
183 while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
184 if (!strncmp(tok, "none", strlen(tok))) {
185 callchain_param.mode = CHAIN_NONE;
186 callchain_param.enabled = false;
187 symbol_conf.use_callchain = false;
188 return 0;
189 }
190
191 if (!parse_callchain_mode(tok) ||
192 !parse_callchain_order(tok) ||
193 !parse_callchain_sort_key(tok) ||
194 !parse_callchain_value(tok)) {
195 /* parsing ok - move on to the next */
196 try_stack_size = false;
197 goto next;
198 } else if (allow_record_opt && !record_opt_set) {
199 if (parse_callchain_record(tok, &callchain_param))
200 goto try_numbers;
201
202 /* assume that number followed by 'dwarf' is stack size */
203 if (callchain_param.record_mode == CALLCHAIN_DWARF)
204 try_stack_size = true;
205
206 record_opt_set = true;
207 goto next;
208 }
209
210try_numbers:
211 if (try_stack_size) {
212 unsigned long size = 0;
213
214 if (get_stack_size(tok, &size) < 0)
215 return -1;
216 callchain_param.dump_size = size;
217 try_stack_size = false;
218 } else if (!minpcnt_set) {
219 /* try to get the min percent */
220 callchain_param.min_percent = strtod(tok, &endptr);
221 if (tok == endptr)
222 return -1;
223 minpcnt_set = true;
224 } else {
225 /* try print limit at last */
226 callchain_param.print_limit = strtoul(tok, &endptr, 0);
227 if (tok == endptr)
228 return -1;
229 }
230next:
231 arg = NULL;
232 }
233
234 if (callchain_register_param(&callchain_param) < 0) {
235 pr_err("Can't register callchain params\n");
236 return -1;
237 }
238 return 0;
239}
240
241int parse_callchain_report_opt(const char *arg)
242{
243 return __parse_callchain_report_opt(arg, false);
244}
245
246int parse_callchain_top_opt(const char *arg)
247{
248 return __parse_callchain_report_opt(arg, true);
249}
250
251int parse_callchain_record(const char *arg, struct callchain_param *param)
252{
253 char *tok, *name, *saveptr = NULL;
254 char *buf;
255 int ret = -1;
256
257 /* We need buffer that we know we can write to. */
258 buf = malloc(strlen(arg) + 1);
259 if (!buf)
260 return -ENOMEM;
261
262 strcpy(buf, arg);
263
264 tok = strtok_r((char *)buf, ",", &saveptr);
265 name = tok ? : (char *)buf;
266
267 do {
268 /* Framepointer style */
269 if (!strncmp(name, "fp", sizeof("fp"))) {
270 ret = 0;
271 param->record_mode = CALLCHAIN_FP;
272
273 tok = strtok_r(NULL, ",", &saveptr);
274 if (tok) {
275 unsigned long size;
276
277 size = strtoul(tok, &name, 0);
278 if (size < (unsigned) sysctl__max_stack())
279 param->max_stack = size;
280 }
281 break;
282
283 /* Dwarf style */
284 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
285 const unsigned long default_stack_dump_size = 8192;
286
287 ret = 0;
288 param->record_mode = CALLCHAIN_DWARF;
289 param->dump_size = default_stack_dump_size;
290 dwarf_callchain_users = true;
291
292 tok = strtok_r(NULL, ",", &saveptr);
293 if (tok) {
294 unsigned long size = 0;
295
296 ret = get_stack_size(tok, &size);
297 param->dump_size = size;
298 }
299 } else if (!strncmp(name, "lbr", sizeof("lbr"))) {
300 if (!strtok_r(NULL, ",", &saveptr)) {
301 param->record_mode = CALLCHAIN_LBR;
302 ret = 0;
303 } else
304 pr_err("callchain: No more arguments "
305 "needed for --call-graph lbr\n");
306 break;
307 } else {
308 pr_err("callchain: Unknown --call-graph option "
309 "value: %s\n", arg);
310 break;
311 }
312
313 } while (0);
314
315 free(buf);
316 return ret;
317}
318
319int perf_callchain_config(const char *var, const char *value)
320{
321 char *endptr;
322
323 if (!strstarts(var, "call-graph."))
324 return 0;
325 var += sizeof("call-graph.") - 1;
326
327 if (!strcmp(var, "record-mode"))
328 return parse_callchain_record_opt(value, &callchain_param);
329 if (!strcmp(var, "dump-size")) {
330 unsigned long size = 0;
331 int ret;
332
333 ret = get_stack_size(value, &size);
334 callchain_param.dump_size = size;
335
336 return ret;
337 }
338 if (!strcmp(var, "print-type")){
339 int ret;
340 ret = parse_callchain_mode(value);
341 if (ret == -1)
342 pr_err("Invalid callchain mode: %s\n", value);
343 return ret;
344 }
345 if (!strcmp(var, "order")){
346 int ret;
347 ret = parse_callchain_order(value);
348 if (ret == -1)
349 pr_err("Invalid callchain order: %s\n", value);
350 return ret;
351 }
352 if (!strcmp(var, "sort-key")){
353 int ret;
354 ret = parse_callchain_sort_key(value);
355 if (ret == -1)
356 pr_err("Invalid callchain sort key: %s\n", value);
357 return ret;
358 }
359 if (!strcmp(var, "threshold")) {
360 callchain_param.min_percent = strtod(value, &endptr);
361 if (value == endptr) {
362 pr_err("Invalid callchain threshold: %s\n", value);
363 return -1;
364 }
365 }
366 if (!strcmp(var, "print-limit")) {
367 callchain_param.print_limit = strtod(value, &endptr);
368 if (value == endptr) {
369 pr_err("Invalid callchain print limit: %s\n", value);
370 return -1;
371 }
372 }
373
374 return 0;
375}
376
377static void
378rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
379 enum chain_mode mode)
380{
381 struct rb_node **p = &root->rb_node;
382 struct rb_node *parent = NULL;
383 struct callchain_node *rnode;
384 u64 chain_cumul = callchain_cumul_hits(chain);
385
386 while (*p) {
387 u64 rnode_cumul;
388
389 parent = *p;
390 rnode = rb_entry(parent, struct callchain_node, rb_node);
391 rnode_cumul = callchain_cumul_hits(rnode);
392
393 switch (mode) {
394 case CHAIN_FLAT:
395 case CHAIN_FOLDED:
396 if (rnode->hit < chain->hit)
397 p = &(*p)->rb_left;
398 else
399 p = &(*p)->rb_right;
400 break;
401 case CHAIN_GRAPH_ABS: /* Falldown */
402 case CHAIN_GRAPH_REL:
403 if (rnode_cumul < chain_cumul)
404 p = &(*p)->rb_left;
405 else
406 p = &(*p)->rb_right;
407 break;
408 case CHAIN_NONE:
409 default:
410 break;
411 }
412 }
413
414 rb_link_node(&chain->rb_node, parent, p);
415 rb_insert_color(&chain->rb_node, root);
416}
417
418static void
419__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
420 u64 min_hit)
421{
422 struct rb_node *n;
423 struct callchain_node *child;
424
425 n = rb_first(&node->rb_root_in);
426 while (n) {
427 child = rb_entry(n, struct callchain_node, rb_node_in);
428 n = rb_next(n);
429
430 __sort_chain_flat(rb_root, child, min_hit);
431 }
432
433 if (node->hit && node->hit >= min_hit)
434 rb_insert_callchain(rb_root, node, CHAIN_FLAT);
435}
436
437/*
438 * Once we get every callchains from the stream, we can now
439 * sort them by hit
440 */
441static void
442sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
443 u64 min_hit, struct callchain_param *param __maybe_unused)
444{
445 *rb_root = RB_ROOT;
446 __sort_chain_flat(rb_root, &root->node, min_hit);
447}
448
449static void __sort_chain_graph_abs(struct callchain_node *node,
450 u64 min_hit)
451{
452 struct rb_node *n;
453 struct callchain_node *child;
454
455 node->rb_root = RB_ROOT;
456 n = rb_first(&node->rb_root_in);
457
458 while (n) {
459 child = rb_entry(n, struct callchain_node, rb_node_in);
460 n = rb_next(n);
461
462 __sort_chain_graph_abs(child, min_hit);
463 if (callchain_cumul_hits(child) >= min_hit)
464 rb_insert_callchain(&node->rb_root, child,
465 CHAIN_GRAPH_ABS);
466 }
467}
468
469static void
470sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
471 u64 min_hit, struct callchain_param *param __maybe_unused)
472{
473 __sort_chain_graph_abs(&chain_root->node, min_hit);
474 rb_root->rb_node = chain_root->node.rb_root.rb_node;
475}
476
477static void __sort_chain_graph_rel(struct callchain_node *node,
478 double min_percent)
479{
480 struct rb_node *n;
481 struct callchain_node *child;
482 u64 min_hit;
483
484 node->rb_root = RB_ROOT;
485 min_hit = ceil(node->children_hit * min_percent);
486
487 n = rb_first(&node->rb_root_in);
488 while (n) {
489 child = rb_entry(n, struct callchain_node, rb_node_in);
490 n = rb_next(n);
491
492 __sort_chain_graph_rel(child, min_percent);
493 if (callchain_cumul_hits(child) >= min_hit)
494 rb_insert_callchain(&node->rb_root, child,
495 CHAIN_GRAPH_REL);
496 }
497}
498
499static void
500sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
501 u64 min_hit __maybe_unused, struct callchain_param *param)
502{
503 __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
504 rb_root->rb_node = chain_root->node.rb_root.rb_node;
505}
506
507int callchain_register_param(struct callchain_param *param)
508{
509 switch (param->mode) {
510 case CHAIN_GRAPH_ABS:
511 param->sort = sort_chain_graph_abs;
512 break;
513 case CHAIN_GRAPH_REL:
514 param->sort = sort_chain_graph_rel;
515 break;
516 case CHAIN_FLAT:
517 case CHAIN_FOLDED:
518 param->sort = sort_chain_flat;
519 break;
520 case CHAIN_NONE:
521 default:
522 return -1;
523 }
524 return 0;
525}
526
527/*
528 * Create a child for a parent. If inherit_children, then the new child
529 * will become the new parent of it's parent children
530 */
531static struct callchain_node *
532create_child(struct callchain_node *parent, bool inherit_children)
533{
534 struct callchain_node *new;
535
536 new = zalloc(sizeof(*new));
537 if (!new) {
538 perror("not enough memory to create child for code path tree");
539 return NULL;
540 }
541 new->parent = parent;
542 INIT_LIST_HEAD(&new->val);
543 INIT_LIST_HEAD(&new->parent_val);
544
545 if (inherit_children) {
546 struct rb_node *n;
547 struct callchain_node *child;
548
549 new->rb_root_in = parent->rb_root_in;
550 parent->rb_root_in = RB_ROOT;
551
552 n = rb_first(&new->rb_root_in);
553 while (n) {
554 child = rb_entry(n, struct callchain_node, rb_node_in);
555 child->parent = new;
556 n = rb_next(n);
557 }
558
559 /* make it the first child */
560 rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
561 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
562 }
563
564 return new;
565}
566
567
568/*
569 * Fill the node with callchain values
570 */
571static int
572fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
573{
574 struct callchain_cursor_node *cursor_node;
575
576 node->val_nr = cursor->nr - cursor->pos;
577 if (!node->val_nr)
578 pr_warning("Warning: empty node in callchain tree\n");
579
580 cursor_node = callchain_cursor_current(cursor);
581
582 while (cursor_node) {
583 struct callchain_list *call;
584
585 call = zalloc(sizeof(*call));
586 if (!call) {
587 perror("not enough memory for the code path tree");
588 return -1;
589 }
590 call->ip = cursor_node->ip;
591 call->ms = cursor_node->ms;
592 map__get(call->ms.map);
593 call->srcline = cursor_node->srcline;
594
595 if (cursor_node->branch) {
596 call->branch_count = 1;
597
598 if (cursor_node->branch_from) {
599 /*
600 * branch_from is set with value somewhere else
601 * to imply it's "to" of a branch.
602 */
603 call->brtype_stat.branch_to = true;
604
605 if (cursor_node->branch_flags.predicted)
606 call->predicted_count = 1;
607
608 if (cursor_node->branch_flags.abort)
609 call->abort_count = 1;
610
611 branch_type_count(&call->brtype_stat,
612 &cursor_node->branch_flags,
613 cursor_node->branch_from,
614 cursor_node->ip);
615 } else {
616 /*
617 * It's "from" of a branch
618 */
619 call->brtype_stat.branch_to = false;
620 call->cycles_count =
621 cursor_node->branch_flags.cycles;
622 call->iter_count = cursor_node->nr_loop_iter;
623 call->iter_cycles = cursor_node->iter_cycles;
624 }
625 }
626
627 list_add_tail(&call->list, &node->val);
628
629 callchain_cursor_advance(cursor);
630 cursor_node = callchain_cursor_current(cursor);
631 }
632 return 0;
633}
634
635static struct callchain_node *
636add_child(struct callchain_node *parent,
637 struct callchain_cursor *cursor,
638 u64 period)
639{
640 struct callchain_node *new;
641
642 new = create_child(parent, false);
643 if (new == NULL)
644 return NULL;
645
646 if (fill_node(new, cursor) < 0) {
647 struct callchain_list *call, *tmp;
648
649 list_for_each_entry_safe(call, tmp, &new->val, list) {
650 list_del_init(&call->list);
651 map__zput(call->ms.map);
652 free(call);
653 }
654 free(new);
655 return NULL;
656 }
657
658 new->children_hit = 0;
659 new->hit = period;
660 new->children_count = 0;
661 new->count = 1;
662 return new;
663}
664
665enum match_result {
666 MATCH_ERROR = -1,
667 MATCH_EQ,
668 MATCH_LT,
669 MATCH_GT,
670};
671
672static enum match_result match_chain_strings(const char *left,
673 const char *right)
674{
675 enum match_result ret = MATCH_EQ;
676 int cmp;
677
678 if (left && right)
679 cmp = strcmp(left, right);
680 else if (!left && right)
681 cmp = 1;
682 else if (left && !right)
683 cmp = -1;
684 else
685 return MATCH_ERROR;
686
687 if (cmp != 0)
688 ret = cmp < 0 ? MATCH_LT : MATCH_GT;
689
690 return ret;
691}
692
693/*
694 * We need to always use relative addresses because we're aggregating
695 * callchains from multiple threads, i.e. different address spaces, so
696 * comparing absolute addresses make no sense as a symbol in a DSO may end up
697 * in a different address when used in a different binary or even the same
698 * binary but with some sort of address randomization technique, thus we need
699 * to compare just relative addresses. -acme
700 */
701static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
702 struct map *right_map, u64 right_ip)
703{
704 struct dso *left_dso = left_map ? left_map->dso : NULL;
705 struct dso *right_dso = right_map ? right_map->dso : NULL;
706
707 if (left_dso != right_dso)
708 return left_dso < right_dso ? MATCH_LT : MATCH_GT;
709
710 if (left_ip != right_ip)
711 return left_ip < right_ip ? MATCH_LT : MATCH_GT;
712
713 return MATCH_EQ;
714}
715
716static enum match_result match_chain(struct callchain_cursor_node *node,
717 struct callchain_list *cnode)
718{
719 enum match_result match = MATCH_ERROR;
720
721 switch (callchain_param.key) {
722 case CCKEY_SRCLINE:
723 match = match_chain_strings(cnode->srcline, node->srcline);
724 if (match != MATCH_ERROR)
725 break;
726 /* otherwise fall-back to symbol-based comparison below */
727 __fallthrough;
728 case CCKEY_FUNCTION:
729 if (node->ms.sym && cnode->ms.sym) {
730 /*
731 * Compare inlined frames based on their symbol name
732 * because different inlined frames will have the same
733 * symbol start. Otherwise do a faster comparison based
734 * on the symbol start address.
735 */
736 if (cnode->ms.sym->inlined || node->ms.sym->inlined) {
737 match = match_chain_strings(cnode->ms.sym->name,
738 node->ms.sym->name);
739 if (match != MATCH_ERROR)
740 break;
741 } else {
742 match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
743 node->ms.map, node->ms.sym->start);
744 break;
745 }
746 }
747 /* otherwise fall-back to IP-based comparison below */
748 __fallthrough;
749 case CCKEY_ADDRESS:
750 default:
751 match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->ms.map, node->ip);
752 break;
753 }
754
755 if (match == MATCH_EQ && node->branch) {
756 cnode->branch_count++;
757
758 if (node->branch_from) {
759 /*
760 * It's "to" of a branch
761 */
762 cnode->brtype_stat.branch_to = true;
763
764 if (node->branch_flags.predicted)
765 cnode->predicted_count++;
766
767 if (node->branch_flags.abort)
768 cnode->abort_count++;
769
770 branch_type_count(&cnode->brtype_stat,
771 &node->branch_flags,
772 node->branch_from,
773 node->ip);
774 } else {
775 /*
776 * It's "from" of a branch
777 */
778 cnode->brtype_stat.branch_to = false;
779 cnode->cycles_count += node->branch_flags.cycles;
780 cnode->iter_count += node->nr_loop_iter;
781 cnode->iter_cycles += node->iter_cycles;
782 cnode->from_count++;
783 }
784 }
785
786 return match;
787}
788
789/*
790 * Split the parent in two parts (a new child is created) and
791 * give a part of its callchain to the created child.
792 * Then create another child to host the given callchain of new branch
793 */
794static int
795split_add_child(struct callchain_node *parent,
796 struct callchain_cursor *cursor,
797 struct callchain_list *to_split,
798 u64 idx_parents, u64 idx_local, u64 period)
799{
800 struct callchain_node *new;
801 struct list_head *old_tail;
802 unsigned int idx_total = idx_parents + idx_local;
803
804 /* split */
805 new = create_child(parent, true);
806 if (new == NULL)
807 return -1;
808
809 /* split the callchain and move a part to the new child */
810 old_tail = parent->val.prev;
811 list_del_range(&to_split->list, old_tail);
812 new->val.next = &to_split->list;
813 new->val.prev = old_tail;
814 to_split->list.prev = &new->val;
815 old_tail->next = &new->val;
816
817 /* split the hits */
818 new->hit = parent->hit;
819 new->children_hit = parent->children_hit;
820 parent->children_hit = callchain_cumul_hits(new);
821 new->val_nr = parent->val_nr - idx_local;
822 parent->val_nr = idx_local;
823 new->count = parent->count;
824 new->children_count = parent->children_count;
825 parent->children_count = callchain_cumul_counts(new);
826
827 /* create a new child for the new branch if any */
828 if (idx_total < cursor->nr) {
829 struct callchain_node *first;
830 struct callchain_list *cnode;
831 struct callchain_cursor_node *node;
832 struct rb_node *p, **pp;
833
834 parent->hit = 0;
835 parent->children_hit += period;
836 parent->count = 0;
837 parent->children_count += 1;
838
839 node = callchain_cursor_current(cursor);
840 new = add_child(parent, cursor, period);
841 if (new == NULL)
842 return -1;
843
844 /*
845 * This is second child since we moved parent's children
846 * to new (first) child above.
847 */
848 p = parent->rb_root_in.rb_node;
849 first = rb_entry(p, struct callchain_node, rb_node_in);
850 cnode = list_first_entry(&first->val, struct callchain_list,
851 list);
852
853 if (match_chain(node, cnode) == MATCH_LT)
854 pp = &p->rb_left;
855 else
856 pp = &p->rb_right;
857
858 rb_link_node(&new->rb_node_in, p, pp);
859 rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
860 } else {
861 parent->hit = period;
862 parent->count = 1;
863 }
864 return 0;
865}
866
867static enum match_result
868append_chain(struct callchain_node *root,
869 struct callchain_cursor *cursor,
870 u64 period);
871
872static int
873append_chain_children(struct callchain_node *root,
874 struct callchain_cursor *cursor,
875 u64 period)
876{
877 struct callchain_node *rnode;
878 struct callchain_cursor_node *node;
879 struct rb_node **p = &root->rb_root_in.rb_node;
880 struct rb_node *parent = NULL;
881
882 node = callchain_cursor_current(cursor);
883 if (!node)
884 return -1;
885
886 /* lookup in children */
887 while (*p) {
888 enum match_result ret;
889
890 parent = *p;
891 rnode = rb_entry(parent, struct callchain_node, rb_node_in);
892
893 /* If at least first entry matches, rely to children */
894 ret = append_chain(rnode, cursor, period);
895 if (ret == MATCH_EQ)
896 goto inc_children_hit;
897 if (ret == MATCH_ERROR)
898 return -1;
899
900 if (ret == MATCH_LT)
901 p = &parent->rb_left;
902 else
903 p = &parent->rb_right;
904 }
905 /* nothing in children, add to the current node */
906 rnode = add_child(root, cursor, period);
907 if (rnode == NULL)
908 return -1;
909
910 rb_link_node(&rnode->rb_node_in, parent, p);
911 rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
912
913inc_children_hit:
914 root->children_hit += period;
915 root->children_count++;
916 return 0;
917}
918
919static enum match_result
920append_chain(struct callchain_node *root,
921 struct callchain_cursor *cursor,
922 u64 period)
923{
924 struct callchain_list *cnode;
925 u64 start = cursor->pos;
926 bool found = false;
927 u64 matches;
928 enum match_result cmp = MATCH_ERROR;
929
930 /*
931 * Lookup in the current node
932 * If we have a symbol, then compare the start to match
933 * anywhere inside a function, unless function
934 * mode is disabled.
935 */
936 list_for_each_entry(cnode, &root->val, list) {
937 struct callchain_cursor_node *node;
938
939 node = callchain_cursor_current(cursor);
940 if (!node)
941 break;
942
943 cmp = match_chain(node, cnode);
944 if (cmp != MATCH_EQ)
945 break;
946
947 found = true;
948
949 callchain_cursor_advance(cursor);
950 }
951
952 /* matches not, relay no the parent */
953 if (!found) {
954 WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
955 return cmp;
956 }
957
958 matches = cursor->pos - start;
959
960 /* we match only a part of the node. Split it and add the new chain */
961 if (matches < root->val_nr) {
962 if (split_add_child(root, cursor, cnode, start, matches,
963 period) < 0)
964 return MATCH_ERROR;
965
966 return MATCH_EQ;
967 }
968
969 /* we match 100% of the path, increment the hit */
970 if (matches == root->val_nr && cursor->pos == cursor->nr) {
971 root->hit += period;
972 root->count++;
973 return MATCH_EQ;
974 }
975
976 /* We match the node and still have a part remaining */
977 if (append_chain_children(root, cursor, period) < 0)
978 return MATCH_ERROR;
979
980 return MATCH_EQ;
981}
982
983int callchain_append(struct callchain_root *root,
984 struct callchain_cursor *cursor,
985 u64 period)
986{
987 if (!cursor->nr)
988 return 0;
989
990 callchain_cursor_commit(cursor);
991
992 if (append_chain_children(&root->node, cursor, period) < 0)
993 return -1;
994
995 if (cursor->nr > root->max_depth)
996 root->max_depth = cursor->nr;
997
998 return 0;
999}
1000
1001static int
1002merge_chain_branch(struct callchain_cursor *cursor,
1003 struct callchain_node *dst, struct callchain_node *src)
1004{
1005 struct callchain_cursor_node **old_last = cursor->last;
1006 struct callchain_node *child;
1007 struct callchain_list *list, *next_list;
1008 struct rb_node *n;
1009 int old_pos = cursor->nr;
1010 int err = 0;
1011
1012 list_for_each_entry_safe(list, next_list, &src->val, list) {
1013 callchain_cursor_append(cursor, list->ip, &list->ms,
1014 false, NULL, 0, 0, 0, list->srcline);
1015 list_del_init(&list->list);
1016 map__zput(list->ms.map);
1017 free(list);
1018 }
1019
1020 if (src->hit) {
1021 callchain_cursor_commit(cursor);
1022 if (append_chain_children(dst, cursor, src->hit) < 0)
1023 return -1;
1024 }
1025
1026 n = rb_first(&src->rb_root_in);
1027 while (n) {
1028 child = container_of(n, struct callchain_node, rb_node_in);
1029 n = rb_next(n);
1030 rb_erase(&child->rb_node_in, &src->rb_root_in);
1031
1032 err = merge_chain_branch(cursor, dst, child);
1033 if (err)
1034 break;
1035
1036 free(child);
1037 }
1038
1039 cursor->nr = old_pos;
1040 cursor->last = old_last;
1041
1042 return err;
1043}
1044
1045int callchain_merge(struct callchain_cursor *cursor,
1046 struct callchain_root *dst, struct callchain_root *src)
1047{
1048 return merge_chain_branch(cursor, &dst->node, &src->node);
1049}
1050
1051int callchain_cursor_append(struct callchain_cursor *cursor,
1052 u64 ip, struct map_symbol *ms,
1053 bool branch, struct branch_flags *flags,
1054 int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1055 const char *srcline)
1056{
1057 struct callchain_cursor_node *node = *cursor->last;
1058
1059 if (!node) {
1060 node = calloc(1, sizeof(*node));
1061 if (!node)
1062 return -ENOMEM;
1063
1064 *cursor->last = node;
1065 }
1066
1067 node->ip = ip;
1068 map__zput(node->ms.map);
1069 node->ms = *ms;
1070 map__get(node->ms.map);
1071 node->branch = branch;
1072 node->nr_loop_iter = nr_loop_iter;
1073 node->iter_cycles = iter_cycles;
1074 node->srcline = srcline;
1075
1076 if (flags)
1077 memcpy(&node->branch_flags, flags,
1078 sizeof(struct branch_flags));
1079
1080 node->branch_from = branch_from;
1081 cursor->nr++;
1082
1083 cursor->last = &node->next;
1084
1085 return 0;
1086}
1087
1088int sample__resolve_callchain(struct perf_sample *sample,
1089 struct callchain_cursor *cursor, struct symbol **parent,
1090 struct evsel *evsel, struct addr_location *al,
1091 int max_stack)
1092{
1093 if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
1094 return 0;
1095
1096 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
1097 perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
1098 return thread__resolve_callchain(al->thread, cursor, evsel, sample,
1099 parent, al, max_stack);
1100 }
1101 return 0;
1102}
1103
1104int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
1105{
1106 if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
1107 !symbol_conf.show_branchflag_count)
1108 return 0;
1109 return callchain_append(he->callchain, &callchain_cursor, sample->period);
1110}
1111
1112int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
1113 bool hide_unresolved)
1114{
1115 al->maps = node->ms.maps;
1116 al->map = node->ms.map;
1117 al->sym = node->ms.sym;
1118 al->srcline = node->srcline;
1119 al->addr = node->ip;
1120
1121 if (al->sym == NULL) {
1122 if (hide_unresolved)
1123 return 0;
1124 if (al->map == NULL)
1125 goto out;
1126 }
1127
1128 if (al->maps == machine__kernel_maps(al->maps->machine)) {
1129 if (machine__is_host(al->maps->machine)) {
1130 al->cpumode = PERF_RECORD_MISC_KERNEL;
1131 al->level = 'k';
1132 } else {
1133 al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1134 al->level = 'g';
1135 }
1136 } else {
1137 if (machine__is_host(al->maps->machine)) {
1138 al->cpumode = PERF_RECORD_MISC_USER;
1139 al->level = '.';
1140 } else if (perf_guest) {
1141 al->cpumode = PERF_RECORD_MISC_GUEST_USER;
1142 al->level = 'u';
1143 } else {
1144 al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
1145 al->level = 'H';
1146 }
1147 }
1148
1149out:
1150 return 1;
1151}
1152
1153char *callchain_list__sym_name(struct callchain_list *cl,
1154 char *bf, size_t bfsize, bool show_dso)
1155{
1156 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1157 bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
1158 int printed;
1159
1160 if (cl->ms.sym) {
1161 const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1162
1163 if (show_srcline && cl->srcline)
1164 printed = scnprintf(bf, bfsize, "%s %s%s",
1165 cl->ms.sym->name, cl->srcline,
1166 inlined);
1167 else
1168 printed = scnprintf(bf, bfsize, "%s%s",
1169 cl->ms.sym->name, inlined);
1170 } else
1171 printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1172
1173 if (show_dso)
1174 scnprintf(bf + printed, bfsize - printed, " %s",
1175 cl->ms.map ?
1176 cl->ms.map->dso->short_name :
1177 "unknown");
1178
1179 return bf;
1180}
1181
1182char *callchain_node__scnprintf_value(struct callchain_node *node,
1183 char *bf, size_t bfsize, u64 total)
1184{
1185 double percent = 0.0;
1186 u64 period = callchain_cumul_hits(node);
1187 unsigned count = callchain_cumul_counts(node);
1188
1189 if (callchain_param.mode == CHAIN_FOLDED) {
1190 period = node->hit;
1191 count = node->count;
1192 }
1193
1194 switch (callchain_param.value) {
1195 case CCVAL_PERIOD:
1196 scnprintf(bf, bfsize, "%"PRIu64, period);
1197 break;
1198 case CCVAL_COUNT:
1199 scnprintf(bf, bfsize, "%u", count);
1200 break;
1201 case CCVAL_PERCENT:
1202 default:
1203 if (total)
1204 percent = period * 100.0 / total;
1205 scnprintf(bf, bfsize, "%.2f%%", percent);
1206 break;
1207 }
1208 return bf;
1209}
1210
1211int callchain_node__fprintf_value(struct callchain_node *node,
1212 FILE *fp, u64 total)
1213{
1214 double percent = 0.0;
1215 u64 period = callchain_cumul_hits(node);
1216 unsigned count = callchain_cumul_counts(node);
1217
1218 if (callchain_param.mode == CHAIN_FOLDED) {
1219 period = node->hit;
1220 count = node->count;
1221 }
1222
1223 switch (callchain_param.value) {
1224 case CCVAL_PERIOD:
1225 return fprintf(fp, "%"PRIu64, period);
1226 case CCVAL_COUNT:
1227 return fprintf(fp, "%u", count);
1228 case CCVAL_PERCENT:
1229 default:
1230 if (total)
1231 percent = period * 100.0 / total;
1232 return percent_color_fprintf(fp, "%.2f%%", percent);
1233 }
1234 return 0;
1235}
1236
1237static void callchain_counts_value(struct callchain_node *node,
1238 u64 *branch_count, u64 *predicted_count,
1239 u64 *abort_count, u64 *cycles_count)
1240{
1241 struct callchain_list *clist;
1242
1243 list_for_each_entry(clist, &node->val, list) {
1244 if (branch_count)
1245 *branch_count += clist->branch_count;
1246
1247 if (predicted_count)
1248 *predicted_count += clist->predicted_count;
1249
1250 if (abort_count)
1251 *abort_count += clist->abort_count;
1252
1253 if (cycles_count)
1254 *cycles_count += clist->cycles_count;
1255 }
1256}
1257
1258static int callchain_node_branch_counts_cumul(struct callchain_node *node,
1259 u64 *branch_count,
1260 u64 *predicted_count,
1261 u64 *abort_count,
1262 u64 *cycles_count)
1263{
1264 struct callchain_node *child;
1265 struct rb_node *n;
1266
1267 n = rb_first(&node->rb_root_in);
1268 while (n) {
1269 child = rb_entry(n, struct callchain_node, rb_node_in);
1270 n = rb_next(n);
1271
1272 callchain_node_branch_counts_cumul(child, branch_count,
1273 predicted_count,
1274 abort_count,
1275 cycles_count);
1276
1277 callchain_counts_value(child, branch_count,
1278 predicted_count, abort_count,
1279 cycles_count);
1280 }
1281
1282 return 0;
1283}
1284
1285int callchain_branch_counts(struct callchain_root *root,
1286 u64 *branch_count, u64 *predicted_count,
1287 u64 *abort_count, u64 *cycles_count)
1288{
1289 if (branch_count)
1290 *branch_count = 0;
1291
1292 if (predicted_count)
1293 *predicted_count = 0;
1294
1295 if (abort_count)
1296 *abort_count = 0;
1297
1298 if (cycles_count)
1299 *cycles_count = 0;
1300
1301 return callchain_node_branch_counts_cumul(&root->node,
1302 branch_count,
1303 predicted_count,
1304 abort_count,
1305 cycles_count);
1306}
1307
1308static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
1309{
1310 return scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
1311}
1312
1313static int count_float_printf(int idx, const char *str, float value,
1314 char *bf, int bfsize, float threshold)
1315{
1316 if (threshold != 0.0 && value < threshold)
1317 return 0;
1318
1319 return scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
1320}
1321
1322static int branch_to_str(char *bf, int bfsize,
1323 u64 branch_count, u64 predicted_count,
1324 u64 abort_count,
1325 struct branch_type_stat *brtype_stat)
1326{
1327 int printed, i = 0;
1328
1329 printed = branch_type_str(brtype_stat, bf, bfsize);
1330 if (printed)
1331 i++;
1332
1333 if (predicted_count < branch_count) {
1334 printed += count_float_printf(i++, "predicted",
1335 predicted_count * 100.0 / branch_count,
1336 bf + printed, bfsize - printed, 0.0);
1337 }
1338
1339 if (abort_count) {
1340 printed += count_float_printf(i++, "abort",
1341 abort_count * 100.0 / branch_count,
1342 bf + printed, bfsize - printed, 0.1);
1343 }
1344
1345 if (i)
1346 printed += scnprintf(bf + printed, bfsize - printed, ")");
1347
1348 return printed;
1349}
1350
1351static int branch_from_str(char *bf, int bfsize,
1352 u64 branch_count,
1353 u64 cycles_count, u64 iter_count,
1354 u64 iter_cycles, u64 from_count)
1355{
1356 int printed = 0, i = 0;
1357 u64 cycles, v = 0;
1358
1359 cycles = cycles_count / branch_count;
1360 if (cycles) {
1361 printed += count_pri64_printf(i++, "cycles",
1362 cycles,
1363 bf + printed, bfsize - printed);
1364 }
1365
1366 if (iter_count && from_count) {
1367 v = iter_count / from_count;
1368 if (v) {
1369 printed += count_pri64_printf(i++, "iter",
1370 v, bf + printed, bfsize - printed);
1371
1372 printed += count_pri64_printf(i++, "avg_cycles",
1373 iter_cycles / iter_count,
1374 bf + printed, bfsize - printed);
1375 }
1376 }
1377
1378 if (i)
1379 printed += scnprintf(bf + printed, bfsize - printed, ")");
1380
1381 return printed;
1382}
1383
1384static int counts_str_build(char *bf, int bfsize,
1385 u64 branch_count, u64 predicted_count,
1386 u64 abort_count, u64 cycles_count,
1387 u64 iter_count, u64 iter_cycles,
1388 u64 from_count,
1389 struct branch_type_stat *brtype_stat)
1390{
1391 int printed;
1392
1393 if (branch_count == 0)
1394 return scnprintf(bf, bfsize, " (calltrace)");
1395
1396 if (brtype_stat->branch_to) {
1397 printed = branch_to_str(bf, bfsize, branch_count,
1398 predicted_count, abort_count, brtype_stat);
1399 } else {
1400 printed = branch_from_str(bf, bfsize, branch_count,
1401 cycles_count, iter_count, iter_cycles,
1402 from_count);
1403 }
1404
1405 if (!printed)
1406 bf[0] = 0;
1407
1408 return printed;
1409}
1410
1411static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1412 u64 branch_count, u64 predicted_count,
1413 u64 abort_count, u64 cycles_count,
1414 u64 iter_count, u64 iter_cycles,
1415 u64 from_count,
1416 struct branch_type_stat *brtype_stat)
1417{
1418 char str[256];
1419
1420 counts_str_build(str, sizeof(str), branch_count,
1421 predicted_count, abort_count, cycles_count,
1422 iter_count, iter_cycles, from_count, brtype_stat);
1423
1424 if (fp)
1425 return fprintf(fp, "%s", str);
1426
1427 return scnprintf(bf, bfsize, "%s", str);
1428}
1429
1430int callchain_list_counts__printf_value(struct callchain_list *clist,
1431 FILE *fp, char *bf, int bfsize)
1432{
1433 u64 branch_count, predicted_count;
1434 u64 abort_count, cycles_count;
1435 u64 iter_count, iter_cycles;
1436 u64 from_count;
1437
1438 branch_count = clist->branch_count;
1439 predicted_count = clist->predicted_count;
1440 abort_count = clist->abort_count;
1441 cycles_count = clist->cycles_count;
1442 iter_count = clist->iter_count;
1443 iter_cycles = clist->iter_cycles;
1444 from_count = clist->from_count;
1445
1446 return callchain_counts_printf(fp, bf, bfsize, branch_count,
1447 predicted_count, abort_count,
1448 cycles_count, iter_count, iter_cycles,
1449 from_count, &clist->brtype_stat);
1450}
1451
1452static void free_callchain_node(struct callchain_node *node)
1453{
1454 struct callchain_list *list, *tmp;
1455 struct callchain_node *child;
1456 struct rb_node *n;
1457
1458 list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1459 list_del_init(&list->list);
1460 map__zput(list->ms.map);
1461 free(list);
1462 }
1463
1464 list_for_each_entry_safe(list, tmp, &node->val, list) {
1465 list_del_init(&list->list);
1466 map__zput(list->ms.map);
1467 free(list);
1468 }
1469
1470 n = rb_first(&node->rb_root_in);
1471 while (n) {
1472 child = container_of(n, struct callchain_node, rb_node_in);
1473 n = rb_next(n);
1474 rb_erase(&child->rb_node_in, &node->rb_root_in);
1475
1476 free_callchain_node(child);
1477 free(child);
1478 }
1479}
1480
1481void free_callchain(struct callchain_root *root)
1482{
1483 if (!symbol_conf.use_callchain)
1484 return;
1485
1486 free_callchain_node(&root->node);
1487}
1488
1489static u64 decay_callchain_node(struct callchain_node *node)
1490{
1491 struct callchain_node *child;
1492 struct rb_node *n;
1493 u64 child_hits = 0;
1494
1495 n = rb_first(&node->rb_root_in);
1496 while (n) {
1497 child = container_of(n, struct callchain_node, rb_node_in);
1498
1499 child_hits += decay_callchain_node(child);
1500 n = rb_next(n);
1501 }
1502
1503 node->hit = (node->hit * 7) / 8;
1504 node->children_hit = child_hits;
1505
1506 return node->hit;
1507}
1508
1509void decay_callchain(struct callchain_root *root)
1510{
1511 if (!symbol_conf.use_callchain)
1512 return;
1513
1514 decay_callchain_node(&root->node);
1515}
1516
1517int callchain_node__make_parent_list(struct callchain_node *node)
1518{
1519 struct callchain_node *parent = node->parent;
1520 struct callchain_list *chain, *new;
1521 LIST_HEAD(head);
1522
1523 while (parent) {
1524 list_for_each_entry_reverse(chain, &parent->val, list) {
1525 new = malloc(sizeof(*new));
1526 if (new == NULL)
1527 goto out;
1528 *new = *chain;
1529 new->has_children = false;
1530 map__get(new->ms.map);
1531 list_add_tail(&new->list, &head);
1532 }
1533 parent = parent->parent;
1534 }
1535
1536 list_for_each_entry_safe_reverse(chain, new, &head, list)
1537 list_move_tail(&chain->list, &node->parent_val);
1538
1539 if (!list_empty(&node->parent_val)) {
1540 chain = list_first_entry(&node->parent_val, struct callchain_list, list);
1541 chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
1542
1543 chain = list_first_entry(&node->val, struct callchain_list, list);
1544 chain->has_children = false;
1545 }
1546 return 0;
1547
1548out:
1549 list_for_each_entry_safe(chain, new, &head, list) {
1550 list_del_init(&chain->list);
1551 map__zput(chain->ms.map);
1552 free(chain);
1553 }
1554 return -ENOMEM;
1555}
1556
1557int callchain_cursor__copy(struct callchain_cursor *dst,
1558 struct callchain_cursor *src)
1559{
1560 int rc = 0;
1561
1562 callchain_cursor_reset(dst);
1563 callchain_cursor_commit(src);
1564
1565 while (true) {
1566 struct callchain_cursor_node *node;
1567
1568 node = callchain_cursor_current(src);
1569 if (node == NULL)
1570 break;
1571
1572 rc = callchain_cursor_append(dst, node->ip, &node->ms,
1573 node->branch, &node->branch_flags,
1574 node->nr_loop_iter,
1575 node->iter_cycles,
1576 node->branch_from, node->srcline);
1577 if (rc)
1578 break;
1579
1580 callchain_cursor_advance(src);
1581 }
1582
1583 return rc;
1584}
1585
1586/*
1587 * Initialize a cursor before adding entries inside, but keep
1588 * the previously allocated entries as a cache.
1589 */
1590void callchain_cursor_reset(struct callchain_cursor *cursor)
1591{
1592 struct callchain_cursor_node *node;
1593
1594 cursor->nr = 0;
1595 cursor->last = &cursor->first;
1596
1597 for (node = cursor->first; node != NULL; node = node->next)
1598 map__zput(node->ms.map);
1599}
1600
1601void callchain_param_setup(u64 sample_type, const char *arch)
1602{
1603 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
1604 if ((sample_type & PERF_SAMPLE_REGS_USER) &&
1605 (sample_type & PERF_SAMPLE_STACK_USER)) {
1606 callchain_param.record_mode = CALLCHAIN_DWARF;
1607 dwarf_callchain_users = true;
1608 } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1609 callchain_param.record_mode = CALLCHAIN_LBR;
1610 else
1611 callchain_param.record_mode = CALLCHAIN_FP;
1612 }
1613
1614 /*
1615 * It's necessary to use libunwind to reliably determine the caller of
1616 * a leaf function on aarch64, as otherwise we cannot know whether to
1617 * start from the LR or FP.
1618 *
1619 * Always starting from the LR can result in duplicate or entirely
1620 * erroneous entries. Always skipping the LR and starting from the FP
1621 * can result in missing entries.
1622 */
1623 if (callchain_param.record_mode == CALLCHAIN_FP && !strcmp(arch, "arm64"))
1624 dwarf_callchain_users = true;
1625}
1626
1627static bool chain_match(struct callchain_list *base_chain,
1628 struct callchain_list *pair_chain)
1629{
1630 enum match_result match;
1631
1632 match = match_chain_strings(base_chain->srcline,
1633 pair_chain->srcline);
1634 if (match != MATCH_ERROR)
1635 return match == MATCH_EQ;
1636
1637 match = match_chain_dso_addresses(base_chain->ms.map,
1638 base_chain->ip,
1639 pair_chain->ms.map,
1640 pair_chain->ip);
1641
1642 return match == MATCH_EQ;
1643}
1644
1645bool callchain_cnode_matched(struct callchain_node *base_cnode,
1646 struct callchain_node *pair_cnode)
1647{
1648 struct callchain_list *base_chain, *pair_chain;
1649 bool match = false;
1650
1651 pair_chain = list_first_entry(&pair_cnode->val,
1652 struct callchain_list,
1653 list);
1654
1655 list_for_each_entry(base_chain, &base_cnode->val, list) {
1656 if (&pair_chain->list == &pair_cnode->val)
1657 return false;
1658
1659 if (!base_chain->srcline || !pair_chain->srcline) {
1660 pair_chain = list_next_entry(pair_chain, list);
1661 continue;
1662 }
1663
1664 match = chain_match(base_chain, pair_chain);
1665 if (!match)
1666 return false;
1667
1668 pair_chain = list_next_entry(pair_chain, list);
1669 }
1670
1671 /*
1672 * Say chain1 is ABC, chain2 is ABCD, we consider they are
1673 * not fully matched.
1674 */
1675 if (pair_chain && (&pair_chain->list != &pair_cnode->val))
1676 return false;
1677
1678 return match;
1679}
1680
1681static u64 count_callchain_hits(struct hist_entry *he)
1682{
1683 struct rb_root *root = &he->sorted_chain;
1684 struct rb_node *rb_node = rb_first(root);
1685 struct callchain_node *node;
1686 u64 chain_hits = 0;
1687
1688 while (rb_node) {
1689 node = rb_entry(rb_node, struct callchain_node, rb_node);
1690 chain_hits += node->hit;
1691 rb_node = rb_next(rb_node);
1692 }
1693
1694 return chain_hits;
1695}
1696
1697u64 callchain_total_hits(struct hists *hists)
1698{
1699 struct rb_node *next = rb_first_cached(&hists->entries);
1700 u64 chain_hits = 0;
1701
1702 while (next) {
1703 struct hist_entry *he = rb_entry(next, struct hist_entry,
1704 rb_node);
1705
1706 chain_hits += count_callchain_hits(he);
1707 next = rb_next(&he->rb_node);
1708 }
1709
1710 return chain_hits;
1711}
1712
1713s64 callchain_avg_cycles(struct callchain_node *cnode)
1714{
1715 struct callchain_list *chain;
1716 s64 cycles = 0;
1717
1718 list_for_each_entry(chain, &cnode->val, list) {
1719 if (chain->srcline && chain->branch_count)
1720 cycles += chain->cycles_count / chain->branch_count;
1721 }
1722
1723 return cycles;
1724}