Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-annotate.c, see those files for further
6 * copyright notes.
7 */
8
9#include <errno.h>
10#include <inttypes.h>
11#include <libgen.h>
12#include <stdlib.h>
13#include "util.h" // hex_width()
14#include "ui/ui.h"
15#include "sort.h"
16#include "build-id.h"
17#include "color.h"
18#include "config.h"
19#include "dso.h"
20#include "env.h"
21#include "map.h"
22#include "maps.h"
23#include "symbol.h"
24#include "srcline.h"
25#include "units.h"
26#include "debug.h"
27#include "annotate.h"
28#include "annotate-data.h"
29#include "evsel.h"
30#include "evlist.h"
31#include "bpf-event.h"
32#include "bpf-utils.h"
33#include "block-range.h"
34#include "string2.h"
35#include "dwarf-regs.h"
36#include "util/event.h"
37#include "util/sharded_mutex.h"
38#include "arch/common.h"
39#include "namespaces.h"
40#include "thread.h"
41#include "hashmap.h"
42#include <regex.h>
43#include <linux/bitops.h>
44#include <linux/kernel.h>
45#include <linux/string.h>
46#include <linux/zalloc.h>
47#include <subcmd/parse-options.h>
48#include <subcmd/run-command.h>
49
50/* FIXME: For the HE_COLORSET */
51#include "ui/browser.h"
52
53/*
54 * FIXME: Using the same values as slang.h,
55 * but that header may not be available everywhere
56 */
57#define LARROW_CHAR ((unsigned char)',')
58#define RARROW_CHAR ((unsigned char)'+')
59#define DARROW_CHAR ((unsigned char)'.')
60#define UARROW_CHAR ((unsigned char)'-')
61
62#include <linux/ctype.h>
63
64/* global annotation options */
65struct annotation_options annotate_opts;
66
67static regex_t file_lineno;
68
69static struct ins_ops *ins__find(struct arch *arch, const char *name);
70static void ins__sort(struct arch *arch);
71static int disasm_line__parse(char *line, const char **namep, char **rawp);
72static int call__scnprintf(struct ins *ins, char *bf, size_t size,
73 struct ins_operands *ops, int max_ins_name);
74static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
75 struct ins_operands *ops, int max_ins_name);
76
77struct arch {
78 const char *name;
79 struct ins *instructions;
80 size_t nr_instructions;
81 size_t nr_instructions_allocated;
82 struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
83 bool sorted_instructions;
84 bool initialized;
85 const char *insn_suffix;
86 void *priv;
87 unsigned int model;
88 unsigned int family;
89 int (*init)(struct arch *arch, char *cpuid);
90 bool (*ins_is_fused)(struct arch *arch, const char *ins1,
91 const char *ins2);
92 struct {
93 char comment_char;
94 char skip_functions_char;
95 char register_char;
96 char memory_ref_char;
97 } objdump;
98};
99
100static struct ins_ops call_ops;
101static struct ins_ops dec_ops;
102static struct ins_ops jump_ops;
103static struct ins_ops mov_ops;
104static struct ins_ops nop_ops;
105static struct ins_ops lock_ops;
106static struct ins_ops ret_ops;
107
108/* Data type collection debug statistics */
109struct annotated_data_stat ann_data_stat;
110LIST_HEAD(ann_insn_stat);
111
112/* Pseudo data types */
113struct annotated_data_type stackop_type = {
114 .self = {
115 .type_name = (char *)"(stack operation)",
116 .children = LIST_HEAD_INIT(stackop_type.self.children),
117 },
118};
119
120static int arch__grow_instructions(struct arch *arch)
121{
122 struct ins *new_instructions;
123 size_t new_nr_allocated;
124
125 if (arch->nr_instructions_allocated == 0 && arch->instructions)
126 goto grow_from_non_allocated_table;
127
128 new_nr_allocated = arch->nr_instructions_allocated + 128;
129 new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
130 if (new_instructions == NULL)
131 return -1;
132
133out_update_instructions:
134 arch->instructions = new_instructions;
135 arch->nr_instructions_allocated = new_nr_allocated;
136 return 0;
137
138grow_from_non_allocated_table:
139 new_nr_allocated = arch->nr_instructions + 128;
140 new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
141 if (new_instructions == NULL)
142 return -1;
143
144 memcpy(new_instructions, arch->instructions, arch->nr_instructions);
145 goto out_update_instructions;
146}
147
148static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
149{
150 struct ins *ins;
151
152 if (arch->nr_instructions == arch->nr_instructions_allocated &&
153 arch__grow_instructions(arch))
154 return -1;
155
156 ins = &arch->instructions[arch->nr_instructions];
157 ins->name = strdup(name);
158 if (!ins->name)
159 return -1;
160
161 ins->ops = ops;
162 arch->nr_instructions++;
163
164 ins__sort(arch);
165 return 0;
166}
167
168#include "arch/arc/annotate/instructions.c"
169#include "arch/arm/annotate/instructions.c"
170#include "arch/arm64/annotate/instructions.c"
171#include "arch/csky/annotate/instructions.c"
172#include "arch/loongarch/annotate/instructions.c"
173#include "arch/mips/annotate/instructions.c"
174#include "arch/x86/annotate/instructions.c"
175#include "arch/powerpc/annotate/instructions.c"
176#include "arch/riscv64/annotate/instructions.c"
177#include "arch/s390/annotate/instructions.c"
178#include "arch/sparc/annotate/instructions.c"
179
180static struct arch architectures[] = {
181 {
182 .name = "arc",
183 .init = arc__annotate_init,
184 },
185 {
186 .name = "arm",
187 .init = arm__annotate_init,
188 },
189 {
190 .name = "arm64",
191 .init = arm64__annotate_init,
192 },
193 {
194 .name = "csky",
195 .init = csky__annotate_init,
196 },
197 {
198 .name = "mips",
199 .init = mips__annotate_init,
200 .objdump = {
201 .comment_char = '#',
202 },
203 },
204 {
205 .name = "x86",
206 .init = x86__annotate_init,
207 .instructions = x86__instructions,
208 .nr_instructions = ARRAY_SIZE(x86__instructions),
209 .insn_suffix = "bwlq",
210 .objdump = {
211 .comment_char = '#',
212 .register_char = '%',
213 .memory_ref_char = '(',
214 },
215 },
216 {
217 .name = "powerpc",
218 .init = powerpc__annotate_init,
219 },
220 {
221 .name = "riscv64",
222 .init = riscv64__annotate_init,
223 },
224 {
225 .name = "s390",
226 .init = s390__annotate_init,
227 .objdump = {
228 .comment_char = '#',
229 },
230 },
231 {
232 .name = "sparc",
233 .init = sparc__annotate_init,
234 .objdump = {
235 .comment_char = '#',
236 },
237 },
238 {
239 .name = "loongarch",
240 .init = loongarch__annotate_init,
241 .objdump = {
242 .comment_char = '#',
243 },
244 },
245};
246
247static void ins__delete(struct ins_operands *ops)
248{
249 if (ops == NULL)
250 return;
251 zfree(&ops->source.raw);
252 zfree(&ops->source.name);
253 zfree(&ops->target.raw);
254 zfree(&ops->target.name);
255}
256
257static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
258 struct ins_operands *ops, int max_ins_name)
259{
260 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
261}
262
263int ins__scnprintf(struct ins *ins, char *bf, size_t size,
264 struct ins_operands *ops, int max_ins_name)
265{
266 if (ins->ops->scnprintf)
267 return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
268
269 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
270}
271
272bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
273{
274 if (!arch || !arch->ins_is_fused)
275 return false;
276
277 return arch->ins_is_fused(arch, ins1, ins2);
278}
279
280static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
281{
282 char *endptr, *tok, *name;
283 struct map *map = ms->map;
284 struct addr_map_symbol target = {
285 .ms = { .map = map, },
286 };
287
288 ops->target.addr = strtoull(ops->raw, &endptr, 16);
289
290 name = strchr(endptr, '<');
291 if (name == NULL)
292 goto indirect_call;
293
294 name++;
295
296 if (arch->objdump.skip_functions_char &&
297 strchr(name, arch->objdump.skip_functions_char))
298 return -1;
299
300 tok = strchr(name, '>');
301 if (tok == NULL)
302 return -1;
303
304 *tok = '\0';
305 ops->target.name = strdup(name);
306 *tok = '>';
307
308 if (ops->target.name == NULL)
309 return -1;
310find_target:
311 target.addr = map__objdump_2mem(map, ops->target.addr);
312
313 if (maps__find_ams(ms->maps, &target) == 0 &&
314 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
315 ops->target.sym = target.ms.sym;
316
317 return 0;
318
319indirect_call:
320 tok = strchr(endptr, '*');
321 if (tok != NULL) {
322 endptr++;
323
324 /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
325 * Do not parse such instruction. */
326 if (strstr(endptr, "(%r") == NULL)
327 ops->target.addr = strtoull(endptr, NULL, 16);
328 }
329 goto find_target;
330}
331
332static int call__scnprintf(struct ins *ins, char *bf, size_t size,
333 struct ins_operands *ops, int max_ins_name)
334{
335 if (ops->target.sym)
336 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
337
338 if (ops->target.addr == 0)
339 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
340
341 if (ops->target.name)
342 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
343
344 return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
345}
346
347static struct ins_ops call_ops = {
348 .parse = call__parse,
349 .scnprintf = call__scnprintf,
350};
351
352bool ins__is_call(const struct ins *ins)
353{
354 return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops;
355}
356
357/*
358 * Prevents from matching commas in the comment section, e.g.:
359 * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
360 *
361 * and skip comma as part of function arguments, e.g.:
362 * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc>
363 */
364static inline const char *validate_comma(const char *c, struct ins_operands *ops)
365{
366 if (ops->jump.raw_comment && c > ops->jump.raw_comment)
367 return NULL;
368
369 if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
370 return NULL;
371
372 return c;
373}
374
375static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
376{
377 struct map *map = ms->map;
378 struct symbol *sym = ms->sym;
379 struct addr_map_symbol target = {
380 .ms = { .map = map, },
381 };
382 const char *c = strchr(ops->raw, ',');
383 u64 start, end;
384
385 ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
386 ops->jump.raw_func_start = strchr(ops->raw, '<');
387
388 c = validate_comma(c, ops);
389
390 /*
391 * Examples of lines to parse for the _cpp_lex_token@@Base
392 * function:
393 *
394 * 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92>
395 * 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72>
396 *
397 * The first is a jump to an offset inside the same function,
398 * the second is to another function, i.e. that 0xa72 is an
399 * offset in the cpp_named_operator2name@@base function.
400 */
401 /*
402 * skip over possible up to 2 operands to get to address, e.g.:
403 * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
404 */
405 if (c++ != NULL) {
406 ops->target.addr = strtoull(c, NULL, 16);
407 if (!ops->target.addr) {
408 c = strchr(c, ',');
409 c = validate_comma(c, ops);
410 if (c++ != NULL)
411 ops->target.addr = strtoull(c, NULL, 16);
412 }
413 } else {
414 ops->target.addr = strtoull(ops->raw, NULL, 16);
415 }
416
417 target.addr = map__objdump_2mem(map, ops->target.addr);
418 start = map__unmap_ip(map, sym->start);
419 end = map__unmap_ip(map, sym->end);
420
421 ops->target.outside = target.addr < start || target.addr > end;
422
423 /*
424 * FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
425
426 cpp_named_operator2name@@Base+0xa72
427
428 * Point to a place that is after the cpp_named_operator2name
429 * boundaries, i.e. in the ELF symbol table for cc1
430 * cpp_named_operator2name is marked as being 32-bytes long, but it in
431 * fact is much larger than that, so we seem to need a symbols__find()
432 * routine that looks for >= current->start and < next_symbol->start,
433 * possibly just for C++ objects?
434 *
435 * For now lets just make some progress by marking jumps to outside the
436 * current function as call like.
437 *
438 * Actual navigation will come next, with further understanding of how
439 * the symbol searching and disassembly should be done.
440 */
441 if (maps__find_ams(ms->maps, &target) == 0 &&
442 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
443 ops->target.sym = target.ms.sym;
444
445 if (!ops->target.outside) {
446 ops->target.offset = target.addr - start;
447 ops->target.offset_avail = true;
448 } else {
449 ops->target.offset_avail = false;
450 }
451
452 return 0;
453}
454
455static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
456 struct ins_operands *ops, int max_ins_name)
457{
458 const char *c;
459
460 if (!ops->target.addr || ops->target.offset < 0)
461 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
462
463 if (ops->target.outside && ops->target.sym != NULL)
464 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
465
466 c = strchr(ops->raw, ',');
467 c = validate_comma(c, ops);
468
469 if (c != NULL) {
470 const char *c2 = strchr(c + 1, ',');
471
472 c2 = validate_comma(c2, ops);
473 /* check for 3-op insn */
474 if (c2 != NULL)
475 c = c2;
476 c++;
477
478 /* mirror arch objdump's space-after-comma style */
479 if (*c == ' ')
480 c++;
481 }
482
483 return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
484 ins->name, c ? c - ops->raw : 0, ops->raw,
485 ops->target.offset);
486}
487
488static void jump__delete(struct ins_operands *ops __maybe_unused)
489{
490 /*
491 * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the
492 * raw string, don't free them.
493 */
494}
495
496static struct ins_ops jump_ops = {
497 .free = jump__delete,
498 .parse = jump__parse,
499 .scnprintf = jump__scnprintf,
500};
501
502bool ins__is_jump(const struct ins *ins)
503{
504 return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops;
505}
506
507static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
508{
509 char *endptr, *name, *t;
510
511 if (strstr(raw, "(%rip)") == NULL)
512 return 0;
513
514 *addrp = strtoull(comment, &endptr, 16);
515 if (endptr == comment)
516 return 0;
517 name = strchr(endptr, '<');
518 if (name == NULL)
519 return -1;
520
521 name++;
522
523 t = strchr(name, '>');
524 if (t == NULL)
525 return 0;
526
527 *t = '\0';
528 *namep = strdup(name);
529 *t = '>';
530
531 return 0;
532}
533
534static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
535{
536 ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
537 if (ops->locked.ops == NULL)
538 return 0;
539
540 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
541 goto out_free_ops;
542
543 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
544
545 if (ops->locked.ins.ops == NULL)
546 goto out_free_ops;
547
548 if (ops->locked.ins.ops->parse &&
549 ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0)
550 goto out_free_ops;
551
552 return 0;
553
554out_free_ops:
555 zfree(&ops->locked.ops);
556 return 0;
557}
558
559static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
560 struct ins_operands *ops, int max_ins_name)
561{
562 int printed;
563
564 if (ops->locked.ins.ops == NULL)
565 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
566
567 printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
568 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
569 size - printed, ops->locked.ops, max_ins_name);
570}
571
572static void lock__delete(struct ins_operands *ops)
573{
574 struct ins *ins = &ops->locked.ins;
575
576 if (ins->ops && ins->ops->free)
577 ins->ops->free(ops->locked.ops);
578 else
579 ins__delete(ops->locked.ops);
580
581 zfree(&ops->locked.ops);
582 zfree(&ops->target.raw);
583 zfree(&ops->target.name);
584}
585
586static struct ins_ops lock_ops = {
587 .free = lock__delete,
588 .parse = lock__parse,
589 .scnprintf = lock__scnprintf,
590};
591
592/*
593 * Check if the operand has more than one registers like x86 SIB addressing:
594 * 0x1234(%rax, %rbx, 8)
595 *
596 * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check
597 * the input string after 'memory_ref_char' if exists.
598 */
599static bool check_multi_regs(struct arch *arch, const char *op)
600{
601 int count = 0;
602
603 if (arch->objdump.register_char == 0)
604 return false;
605
606 if (arch->objdump.memory_ref_char) {
607 op = strchr(op, arch->objdump.memory_ref_char);
608 if (op == NULL)
609 return false;
610 }
611
612 while ((op = strchr(op, arch->objdump.register_char)) != NULL) {
613 count++;
614 op++;
615 }
616
617 return count > 1;
618}
619
620static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
621{
622 char *s = strchr(ops->raw, ','), *target, *comment, prev;
623
624 if (s == NULL)
625 return -1;
626
627 *s = '\0';
628
629 /*
630 * x86 SIB addressing has something like 0x8(%rax, %rcx, 1)
631 * then it needs to have the closing parenthesis.
632 */
633 if (strchr(ops->raw, '(')) {
634 *s = ',';
635 s = strchr(ops->raw, ')');
636 if (s == NULL || s[1] != ',')
637 return -1;
638 *++s = '\0';
639 }
640
641 ops->source.raw = strdup(ops->raw);
642 *s = ',';
643
644 if (ops->source.raw == NULL)
645 return -1;
646
647 ops->source.multi_regs = check_multi_regs(arch, ops->source.raw);
648
649 target = skip_spaces(++s);
650 comment = strchr(s, arch->objdump.comment_char);
651
652 if (comment != NULL)
653 s = comment - 1;
654 else
655 s = strchr(s, '\0') - 1;
656
657 while (s > target && isspace(s[0]))
658 --s;
659 s++;
660 prev = *s;
661 *s = '\0';
662
663 ops->target.raw = strdup(target);
664 *s = prev;
665
666 if (ops->target.raw == NULL)
667 goto out_free_source;
668
669 ops->target.multi_regs = check_multi_regs(arch, ops->target.raw);
670
671 if (comment == NULL)
672 return 0;
673
674 comment = skip_spaces(comment);
675 comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
676 comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
677
678 return 0;
679
680out_free_source:
681 zfree(&ops->source.raw);
682 return -1;
683}
684
685static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
686 struct ins_operands *ops, int max_ins_name)
687{
688 return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
689 ops->source.name ?: ops->source.raw,
690 ops->target.name ?: ops->target.raw);
691}
692
693static struct ins_ops mov_ops = {
694 .parse = mov__parse,
695 .scnprintf = mov__scnprintf,
696};
697
698static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
699{
700 char *target, *comment, *s, prev;
701
702 target = s = ops->raw;
703
704 while (s[0] != '\0' && !isspace(s[0]))
705 ++s;
706 prev = *s;
707 *s = '\0';
708
709 ops->target.raw = strdup(target);
710 *s = prev;
711
712 if (ops->target.raw == NULL)
713 return -1;
714
715 comment = strchr(s, arch->objdump.comment_char);
716 if (comment == NULL)
717 return 0;
718
719 comment = skip_spaces(comment);
720 comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
721
722 return 0;
723}
724
725static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
726 struct ins_operands *ops, int max_ins_name)
727{
728 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
729 ops->target.name ?: ops->target.raw);
730}
731
732static struct ins_ops dec_ops = {
733 .parse = dec__parse,
734 .scnprintf = dec__scnprintf,
735};
736
737static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
738 struct ins_operands *ops __maybe_unused, int max_ins_name)
739{
740 return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
741}
742
743static struct ins_ops nop_ops = {
744 .scnprintf = nop__scnprintf,
745};
746
747static struct ins_ops ret_ops = {
748 .scnprintf = ins__raw_scnprintf,
749};
750
751bool ins__is_ret(const struct ins *ins)
752{
753 return ins->ops == &ret_ops;
754}
755
756bool ins__is_lock(const struct ins *ins)
757{
758 return ins->ops == &lock_ops;
759}
760
761static int ins__key_cmp(const void *name, const void *insp)
762{
763 const struct ins *ins = insp;
764
765 return strcmp(name, ins->name);
766}
767
768static int ins__cmp(const void *a, const void *b)
769{
770 const struct ins *ia = a;
771 const struct ins *ib = b;
772
773 return strcmp(ia->name, ib->name);
774}
775
776static void ins__sort(struct arch *arch)
777{
778 const int nmemb = arch->nr_instructions;
779
780 qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
781}
782
783static struct ins_ops *__ins__find(struct arch *arch, const char *name)
784{
785 struct ins *ins;
786 const int nmemb = arch->nr_instructions;
787
788 if (!arch->sorted_instructions) {
789 ins__sort(arch);
790 arch->sorted_instructions = true;
791 }
792
793 ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
794 if (ins)
795 return ins->ops;
796
797 if (arch->insn_suffix) {
798 char tmp[32];
799 char suffix;
800 size_t len = strlen(name);
801
802 if (len == 0 || len >= sizeof(tmp))
803 return NULL;
804
805 suffix = name[len - 1];
806 if (strchr(arch->insn_suffix, suffix) == NULL)
807 return NULL;
808
809 strcpy(tmp, name);
810 tmp[len - 1] = '\0'; /* remove the suffix and check again */
811
812 ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
813 }
814 return ins ? ins->ops : NULL;
815}
816
817static struct ins_ops *ins__find(struct arch *arch, const char *name)
818{
819 struct ins_ops *ops = __ins__find(arch, name);
820
821 if (!ops && arch->associate_instruction_ops)
822 ops = arch->associate_instruction_ops(arch, name);
823
824 return ops;
825}
826
827static int arch__key_cmp(const void *name, const void *archp)
828{
829 const struct arch *arch = archp;
830
831 return strcmp(name, arch->name);
832}
833
834static int arch__cmp(const void *a, const void *b)
835{
836 const struct arch *aa = a;
837 const struct arch *ab = b;
838
839 return strcmp(aa->name, ab->name);
840}
841
842static void arch__sort(void)
843{
844 const int nmemb = ARRAY_SIZE(architectures);
845
846 qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
847}
848
849static struct arch *arch__find(const char *name)
850{
851 const int nmemb = ARRAY_SIZE(architectures);
852 static bool sorted;
853
854 if (!sorted) {
855 arch__sort();
856 sorted = true;
857 }
858
859 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
860}
861
862bool arch__is(struct arch *arch, const char *name)
863{
864 return !strcmp(arch->name, name);
865}
866
867/* symbol histogram: key = offset << 16 | evsel->core.idx */
868static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
869{
870 return (key >> 16) + (key & 0xffff);
871}
872
873static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
874{
875 return key1 == key2;
876}
877
878static struct annotated_source *annotated_source__new(void)
879{
880 struct annotated_source *src = zalloc(sizeof(*src));
881
882 if (src != NULL)
883 INIT_LIST_HEAD(&src->source);
884
885 return src;
886}
887
888static __maybe_unused void annotated_source__delete(struct annotated_source *src)
889{
890 struct hashmap_entry *cur;
891 size_t bkt;
892
893 if (src == NULL)
894 return;
895
896 if (src->samples) {
897 hashmap__for_each_entry(src->samples, cur, bkt)
898 zfree(&cur->pvalue);
899 hashmap__free(src->samples);
900 }
901 zfree(&src->histograms);
902 free(src);
903}
904
905static int annotated_source__alloc_histograms(struct annotated_source *src,
906 int nr_hists)
907{
908 src->nr_histograms = nr_hists;
909 src->histograms = calloc(nr_hists, sizeof(*src->histograms));
910
911 if (src->histograms == NULL)
912 return -1;
913
914 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
915 if (src->samples == NULL)
916 zfree(&src->histograms);
917
918 return src->histograms ? 0 : -1;
919}
920
921void symbol__annotate_zero_histograms(struct symbol *sym)
922{
923 struct annotation *notes = symbol__annotation(sym);
924
925 annotation__lock(notes);
926 if (notes->src != NULL) {
927 memset(notes->src->histograms, 0,
928 notes->src->nr_histograms * sizeof(*notes->src->histograms));
929 hashmap__clear(notes->src->samples);
930 }
931 if (notes->branch && notes->branch->cycles_hist) {
932 memset(notes->branch->cycles_hist, 0,
933 symbol__size(sym) * sizeof(struct cyc_hist));
934 }
935 annotation__unlock(notes);
936}
937
938static int __symbol__account_cycles(struct cyc_hist *ch,
939 u64 start,
940 unsigned offset, unsigned cycles,
941 unsigned have_start)
942{
943 /*
944 * For now we can only account one basic block per
945 * final jump. But multiple could be overlapping.
946 * Always account the longest one. So when
947 * a shorter one has been already seen throw it away.
948 *
949 * We separately always account the full cycles.
950 */
951 ch[offset].num_aggr++;
952 ch[offset].cycles_aggr += cycles;
953
954 if (cycles > ch[offset].cycles_max)
955 ch[offset].cycles_max = cycles;
956
957 if (ch[offset].cycles_min) {
958 if (cycles && cycles < ch[offset].cycles_min)
959 ch[offset].cycles_min = cycles;
960 } else
961 ch[offset].cycles_min = cycles;
962
963 if (!have_start && ch[offset].have_start)
964 return 0;
965 if (ch[offset].num) {
966 if (have_start && (!ch[offset].have_start ||
967 ch[offset].start > start)) {
968 ch[offset].have_start = 0;
969 ch[offset].cycles = 0;
970 ch[offset].num = 0;
971 if (ch[offset].reset < 0xffff)
972 ch[offset].reset++;
973 } else if (have_start &&
974 ch[offset].start < start)
975 return 0;
976 }
977
978 if (ch[offset].num < NUM_SPARKS)
979 ch[offset].cycles_spark[ch[offset].num] = cycles;
980
981 ch[offset].have_start = have_start;
982 ch[offset].start = start;
983 ch[offset].cycles += cycles;
984 ch[offset].num++;
985 return 0;
986}
987
988static int __symbol__inc_addr_samples(struct map_symbol *ms,
989 struct annotated_source *src, int evidx, u64 addr,
990 struct perf_sample *sample)
991{
992 struct symbol *sym = ms->sym;
993 long hash_key;
994 u64 offset;
995 struct sym_hist *h;
996 struct sym_hist_entry *entry;
997
998 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
999
1000 if ((addr < sym->start || addr >= sym->end) &&
1001 (addr != sym->end || sym->start != sym->end)) {
1002 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
1003 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
1004 return -ERANGE;
1005 }
1006
1007 offset = addr - sym->start;
1008 h = annotated_source__histogram(src, evidx);
1009 if (h == NULL) {
1010 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
1011 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
1012 return -ENOMEM;
1013 }
1014
1015 hash_key = offset << 16 | evidx;
1016 if (!hashmap__find(src->samples, hash_key, &entry)) {
1017 entry = zalloc(sizeof(*entry));
1018 if (entry == NULL)
1019 return -ENOMEM;
1020
1021 if (hashmap__add(src->samples, hash_key, entry) < 0)
1022 return -ENOMEM;
1023 }
1024
1025 h->nr_samples++;
1026 h->period += sample->period;
1027 entry->nr_samples++;
1028 entry->period += sample->period;
1029
1030 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
1031 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
1032 sym->start, sym->name, addr, addr - sym->start, evidx,
1033 entry->nr_samples, entry->period);
1034 return 0;
1035}
1036
1037struct annotated_branch *annotation__get_branch(struct annotation *notes)
1038{
1039 if (notes == NULL)
1040 return NULL;
1041
1042 if (notes->branch == NULL)
1043 notes->branch = zalloc(sizeof(*notes->branch));
1044
1045 return notes->branch;
1046}
1047
1048static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
1049{
1050 struct annotation *notes = symbol__annotation(sym);
1051 struct annotated_branch *branch;
1052
1053 branch = annotation__get_branch(notes);
1054 if (branch == NULL)
1055 return NULL;
1056
1057 if (branch->cycles_hist == NULL) {
1058 const size_t size = symbol__size(sym);
1059
1060 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
1061 }
1062
1063 return branch->cycles_hist;
1064}
1065
1066struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
1067{
1068 struct annotation *notes = symbol__annotation(sym);
1069
1070 if (notes->src == NULL) {
1071 notes->src = annotated_source__new();
1072 if (notes->src == NULL)
1073 return NULL;
1074 goto alloc_histograms;
1075 }
1076
1077 if (notes->src->histograms == NULL) {
1078alloc_histograms:
1079 annotated_source__alloc_histograms(notes->src, nr_hists);
1080 }
1081
1082 return notes->src;
1083}
1084
1085static int symbol__inc_addr_samples(struct map_symbol *ms,
1086 struct evsel *evsel, u64 addr,
1087 struct perf_sample *sample)
1088{
1089 struct symbol *sym = ms->sym;
1090 struct annotated_source *src;
1091
1092 if (sym == NULL)
1093 return 0;
1094 src = symbol__hists(sym, evsel->evlist->core.nr_entries);
1095 return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
1096}
1097
1098static int symbol__account_cycles(u64 addr, u64 start,
1099 struct symbol *sym, unsigned cycles)
1100{
1101 struct cyc_hist *cycles_hist;
1102 unsigned offset;
1103
1104 if (sym == NULL)
1105 return 0;
1106 cycles_hist = symbol__cycles_hist(sym);
1107 if (cycles_hist == NULL)
1108 return -ENOMEM;
1109 if (addr < sym->start || addr >= sym->end)
1110 return -ERANGE;
1111
1112 if (start) {
1113 if (start < sym->start || start >= sym->end)
1114 return -ERANGE;
1115 if (start >= addr)
1116 start = 0;
1117 }
1118 offset = addr - sym->start;
1119 return __symbol__account_cycles(cycles_hist,
1120 start ? start - sym->start : 0,
1121 offset, cycles,
1122 !!start);
1123}
1124
1125int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
1126 struct addr_map_symbol *start,
1127 unsigned cycles)
1128{
1129 u64 saddr = 0;
1130 int err;
1131
1132 if (!cycles)
1133 return 0;
1134
1135 /*
1136 * Only set start when IPC can be computed. We can only
1137 * compute it when the basic block is completely in a single
1138 * function.
1139 * Special case the case when the jump is elsewhere, but
1140 * it starts on the function start.
1141 */
1142 if (start &&
1143 (start->ms.sym == ams->ms.sym ||
1144 (ams->ms.sym &&
1145 start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
1146 saddr = start->al_addr;
1147 if (saddr == 0)
1148 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
1149 ams->addr,
1150 start ? start->addr : 0,
1151 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
1152 saddr);
1153 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
1154 if (err)
1155 pr_debug2("account_cycles failed %d\n", err);
1156 return err;
1157}
1158
1159static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
1160{
1161 unsigned n_insn = 0;
1162 u64 offset;
1163
1164 for (offset = start; offset <= end; offset++) {
1165 if (notes->src->offsets[offset])
1166 n_insn++;
1167 }
1168 return n_insn;
1169}
1170
1171static void annotated_branch__delete(struct annotated_branch *branch)
1172{
1173 if (branch) {
1174 zfree(&branch->cycles_hist);
1175 free(branch);
1176 }
1177}
1178
1179static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
1180{
1181 unsigned n_insn;
1182 unsigned int cover_insn = 0;
1183 u64 offset;
1184
1185 n_insn = annotation__count_insn(notes, start, end);
1186 if (n_insn && ch->num && ch->cycles) {
1187 struct annotated_branch *branch;
1188 float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
1189
1190 /* Hide data when there are too many overlaps. */
1191 if (ch->reset >= 0x7fff)
1192 return;
1193
1194 for (offset = start; offset <= end; offset++) {
1195 struct annotation_line *al = notes->src->offsets[offset];
1196
1197 if (al && al->cycles && al->cycles->ipc == 0.0) {
1198 al->cycles->ipc = ipc;
1199 cover_insn++;
1200 }
1201 }
1202
1203 branch = annotation__get_branch(notes);
1204 if (cover_insn && branch) {
1205 branch->hit_cycles += ch->cycles;
1206 branch->hit_insn += n_insn * ch->num;
1207 branch->cover_insn += cover_insn;
1208 }
1209 }
1210}
1211
1212static int annotation__compute_ipc(struct annotation *notes, size_t size)
1213{
1214 int err = 0;
1215 s64 offset;
1216
1217 if (!notes->branch || !notes->branch->cycles_hist)
1218 return 0;
1219
1220 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
1221 notes->branch->hit_cycles = 0;
1222 notes->branch->hit_insn = 0;
1223 notes->branch->cover_insn = 0;
1224
1225 annotation__lock(notes);
1226 for (offset = size - 1; offset >= 0; --offset) {
1227 struct cyc_hist *ch;
1228
1229 ch = ¬es->branch->cycles_hist[offset];
1230 if (ch && ch->cycles) {
1231 struct annotation_line *al;
1232
1233 al = notes->src->offsets[offset];
1234 if (al && al->cycles == NULL) {
1235 al->cycles = zalloc(sizeof(*al->cycles));
1236 if (al->cycles == NULL) {
1237 err = ENOMEM;
1238 break;
1239 }
1240 }
1241 if (ch->have_start)
1242 annotation__count_and_fill(notes, ch->start, offset, ch);
1243 if (al && ch->num_aggr) {
1244 al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
1245 al->cycles->max = ch->cycles_max;
1246 al->cycles->min = ch->cycles_min;
1247 }
1248 }
1249 }
1250
1251 if (err) {
1252 while (++offset < (s64)size) {
1253 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
1254
1255 if (ch && ch->cycles) {
1256 struct annotation_line *al = notes->src->offsets[offset];
1257 if (al)
1258 zfree(&al->cycles);
1259 }
1260 }
1261 }
1262
1263 annotation__unlock(notes);
1264 return 0;
1265}
1266
1267int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
1268 struct evsel *evsel)
1269{
1270 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
1271}
1272
1273int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
1274 struct evsel *evsel, u64 ip)
1275{
1276 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
1277}
1278
1279static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
1280{
1281 dl->ins.ops = ins__find(arch, dl->ins.name);
1282
1283 if (!dl->ins.ops)
1284 return;
1285
1286 if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0)
1287 dl->ins.ops = NULL;
1288}
1289
1290static int disasm_line__parse(char *line, const char **namep, char **rawp)
1291{
1292 char tmp, *name = skip_spaces(line);
1293
1294 if (name[0] == '\0')
1295 return -1;
1296
1297 *rawp = name + 1;
1298
1299 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
1300 ++*rawp;
1301
1302 tmp = (*rawp)[0];
1303 (*rawp)[0] = '\0';
1304 *namep = strdup(name);
1305
1306 if (*namep == NULL)
1307 goto out;
1308
1309 (*rawp)[0] = tmp;
1310 *rawp = strim(*rawp);
1311
1312 return 0;
1313
1314out:
1315 return -1;
1316}
1317
1318struct annotate_args {
1319 struct arch *arch;
1320 struct map_symbol ms;
1321 struct evsel *evsel;
1322 struct annotation_options *options;
1323 s64 offset;
1324 char *line;
1325 int line_nr;
1326 char *fileloc;
1327};
1328
1329static void annotation_line__init(struct annotation_line *al,
1330 struct annotate_args *args,
1331 int nr)
1332{
1333 al->offset = args->offset;
1334 al->line = strdup(args->line);
1335 al->line_nr = args->line_nr;
1336 al->fileloc = args->fileloc;
1337 al->data_nr = nr;
1338}
1339
1340static void annotation_line__exit(struct annotation_line *al)
1341{
1342 zfree_srcline(&al->path);
1343 zfree(&al->line);
1344 zfree(&al->cycles);
1345}
1346
1347static size_t disasm_line_size(int nr)
1348{
1349 struct annotation_line *al;
1350
1351 return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr));
1352}
1353
1354/*
1355 * Allocating the disasm annotation line data with
1356 * following structure:
1357 *
1358 * -------------------------------------------
1359 * struct disasm_line | struct annotation_line
1360 * -------------------------------------------
1361 *
1362 * We have 'struct annotation_line' member as last member
1363 * of 'struct disasm_line' to have an easy access.
1364 */
1365static struct disasm_line *disasm_line__new(struct annotate_args *args)
1366{
1367 struct disasm_line *dl = NULL;
1368 int nr = 1;
1369
1370 if (evsel__is_group_event(args->evsel))
1371 nr = args->evsel->core.nr_members;
1372
1373 dl = zalloc(disasm_line_size(nr));
1374 if (!dl)
1375 return NULL;
1376
1377 annotation_line__init(&dl->al, args, nr);
1378 if (dl->al.line == NULL)
1379 goto out_delete;
1380
1381 if (args->offset != -1) {
1382 if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
1383 goto out_free_line;
1384
1385 disasm_line__init_ins(dl, args->arch, &args->ms);
1386 }
1387
1388 return dl;
1389
1390out_free_line:
1391 zfree(&dl->al.line);
1392out_delete:
1393 free(dl);
1394 return NULL;
1395}
1396
1397void disasm_line__free(struct disasm_line *dl)
1398{
1399 if (dl->ins.ops && dl->ins.ops->free)
1400 dl->ins.ops->free(&dl->ops);
1401 else
1402 ins__delete(&dl->ops);
1403 zfree(&dl->ins.name);
1404 annotation_line__exit(&dl->al);
1405 free(dl);
1406}
1407
1408int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
1409{
1410 if (raw || !dl->ins.ops)
1411 return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
1412
1413 return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
1414}
1415
1416void annotation__exit(struct annotation *notes)
1417{
1418 annotated_source__delete(notes->src);
1419 annotated_branch__delete(notes->branch);
1420}
1421
1422static struct sharded_mutex *sharded_mutex;
1423
1424static void annotation__init_sharded_mutex(void)
1425{
1426 /* As many mutexes as there are CPUs. */
1427 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
1428}
1429
1430static size_t annotation__hash(const struct annotation *notes)
1431{
1432 return (size_t)notes;
1433}
1434
1435static struct mutex *annotation__get_mutex(const struct annotation *notes)
1436{
1437 static pthread_once_t once = PTHREAD_ONCE_INIT;
1438
1439 pthread_once(&once, annotation__init_sharded_mutex);
1440 if (!sharded_mutex)
1441 return NULL;
1442
1443 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
1444}
1445
1446void annotation__lock(struct annotation *notes)
1447 NO_THREAD_SAFETY_ANALYSIS
1448{
1449 struct mutex *mutex = annotation__get_mutex(notes);
1450
1451 if (mutex)
1452 mutex_lock(mutex);
1453}
1454
1455void annotation__unlock(struct annotation *notes)
1456 NO_THREAD_SAFETY_ANALYSIS
1457{
1458 struct mutex *mutex = annotation__get_mutex(notes);
1459
1460 if (mutex)
1461 mutex_unlock(mutex);
1462}
1463
1464bool annotation__trylock(struct annotation *notes)
1465{
1466 struct mutex *mutex = annotation__get_mutex(notes);
1467
1468 if (!mutex)
1469 return false;
1470
1471 return mutex_trylock(mutex);
1472}
1473
1474
1475static void annotation_line__add(struct annotation_line *al, struct list_head *head)
1476{
1477 list_add_tail(&al->node, head);
1478}
1479
1480struct annotation_line *
1481annotation_line__next(struct annotation_line *pos, struct list_head *head)
1482{
1483 list_for_each_entry_continue(pos, head, node)
1484 if (pos->offset >= 0)
1485 return pos;
1486
1487 return NULL;
1488}
1489
1490static const char *annotate__address_color(struct block_range *br)
1491{
1492 double cov = block_range__coverage(br);
1493
1494 if (cov >= 0) {
1495 /* mark red for >75% coverage */
1496 if (cov > 0.75)
1497 return PERF_COLOR_RED;
1498
1499 /* mark dull for <1% coverage */
1500 if (cov < 0.01)
1501 return PERF_COLOR_NORMAL;
1502 }
1503
1504 return PERF_COLOR_MAGENTA;
1505}
1506
1507static const char *annotate__asm_color(struct block_range *br)
1508{
1509 double cov = block_range__coverage(br);
1510
1511 if (cov >= 0) {
1512 /* mark dull for <1% coverage */
1513 if (cov < 0.01)
1514 return PERF_COLOR_NORMAL;
1515 }
1516
1517 return PERF_COLOR_BLUE;
1518}
1519
1520static void annotate__branch_printf(struct block_range *br, u64 addr)
1521{
1522 bool emit_comment = true;
1523
1524 if (!br)
1525 return;
1526
1527#if 1
1528 if (br->is_target && br->start == addr) {
1529 struct block_range *branch = br;
1530 double p;
1531
1532 /*
1533 * Find matching branch to our target.
1534 */
1535 while (!branch->is_branch)
1536 branch = block_range__next(branch);
1537
1538 p = 100 *(double)br->entry / branch->coverage;
1539
1540 if (p > 0.1) {
1541 if (emit_comment) {
1542 emit_comment = false;
1543 printf("\t#");
1544 }
1545
1546 /*
1547 * The percentage of coverage joined at this target in relation
1548 * to the next branch.
1549 */
1550 printf(" +%.2f%%", p);
1551 }
1552 }
1553#endif
1554 if (br->is_branch && br->end == addr) {
1555 double p = 100*(double)br->taken / br->coverage;
1556
1557 if (p > 0.1) {
1558 if (emit_comment) {
1559 emit_comment = false;
1560 printf("\t#");
1561 }
1562
1563 /*
1564 * The percentage of coverage leaving at this branch, and
1565 * its prediction ratio.
1566 */
1567 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
1568 }
1569 }
1570}
1571
1572static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
1573{
1574 s64 offset = dl->al.offset;
1575 const u64 addr = start + offset;
1576 struct block_range *br;
1577
1578 br = block_range__find(addr);
1579 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
1580 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
1581 annotate__branch_printf(br, addr);
1582 return 0;
1583}
1584
1585static int
1586annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
1587 struct evsel *evsel, u64 len, int min_pcnt, int printed,
1588 int max_lines, struct annotation_line *queue, int addr_fmt_width,
1589 int percent_type)
1590{
1591 struct disasm_line *dl = container_of(al, struct disasm_line, al);
1592 static const char *prev_line;
1593
1594 if (al->offset != -1) {
1595 double max_percent = 0.0;
1596 int i, nr_percent = 1;
1597 const char *color;
1598 struct annotation *notes = symbol__annotation(sym);
1599
1600 for (i = 0; i < al->data_nr; i++) {
1601 double percent;
1602
1603 percent = annotation_data__percent(&al->data[i],
1604 percent_type);
1605
1606 if (percent > max_percent)
1607 max_percent = percent;
1608 }
1609
1610 if (al->data_nr > nr_percent)
1611 nr_percent = al->data_nr;
1612
1613 if (max_percent < min_pcnt)
1614 return -1;
1615
1616 if (max_lines && printed >= max_lines)
1617 return 1;
1618
1619 if (queue != NULL) {
1620 list_for_each_entry_from(queue, ¬es->src->source, node) {
1621 if (queue == al)
1622 break;
1623 annotation_line__print(queue, sym, start, evsel, len,
1624 0, 0, 1, NULL, addr_fmt_width,
1625 percent_type);
1626 }
1627 }
1628
1629 color = get_percent_color(max_percent);
1630
1631 for (i = 0; i < nr_percent; i++) {
1632 struct annotation_data *data = &al->data[i];
1633 double percent;
1634
1635 percent = annotation_data__percent(data, percent_type);
1636 color = get_percent_color(percent);
1637
1638 if (symbol_conf.show_total_period)
1639 color_fprintf(stdout, color, " %11" PRIu64,
1640 data->he.period);
1641 else if (symbol_conf.show_nr_samples)
1642 color_fprintf(stdout, color, " %7" PRIu64,
1643 data->he.nr_samples);
1644 else
1645 color_fprintf(stdout, color, " %7.2f", percent);
1646 }
1647
1648 printf(" : ");
1649
1650 disasm_line__print(dl, start, addr_fmt_width);
1651
1652 /*
1653 * Also color the filename and line if needed, with
1654 * the same color than the percentage. Don't print it
1655 * twice for close colored addr with the same filename:line
1656 */
1657 if (al->path) {
1658 if (!prev_line || strcmp(prev_line, al->path)) {
1659 color_fprintf(stdout, color, " // %s", al->path);
1660 prev_line = al->path;
1661 }
1662 }
1663
1664 printf("\n");
1665 } else if (max_lines && printed >= max_lines)
1666 return 1;
1667 else {
1668 int width = symbol_conf.show_total_period ? 12 : 8;
1669
1670 if (queue)
1671 return -1;
1672
1673 if (evsel__is_group_event(evsel))
1674 width *= evsel->core.nr_members;
1675
1676 if (!*al->line)
1677 printf(" %*s:\n", width, " ");
1678 else
1679 printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
1680 }
1681
1682 return 0;
1683}
1684
1685/*
1686 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
1687 * which looks like following
1688 *
1689 * 0000000000415500 <_init>:
1690 * 415500: sub $0x8,%rsp
1691 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
1692 * 41550b: test %rax,%rax
1693 * 41550e: je 415515 <_init+0x15>
1694 * 415510: callq 416e70 <__gmon_start__@plt>
1695 * 415515: add $0x8,%rsp
1696 * 415519: retq
1697 *
1698 * it will be parsed and saved into struct disasm_line as
1699 * <offset> <name> <ops.raw>
1700 *
1701 * The offset will be a relative offset from the start of the symbol and -1
1702 * means that it's not a disassembly line so should be treated differently.
1703 * The ops.raw part will be parsed further according to type of the instruction.
1704 */
1705static int symbol__parse_objdump_line(struct symbol *sym,
1706 struct annotate_args *args,
1707 char *parsed_line, int *line_nr, char **fileloc)
1708{
1709 struct map *map = args->ms.map;
1710 struct annotation *notes = symbol__annotation(sym);
1711 struct disasm_line *dl;
1712 char *tmp;
1713 s64 line_ip, offset = -1;
1714 regmatch_t match[2];
1715
1716 /* /filename:linenr ? Save line number and ignore. */
1717 if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
1718 *line_nr = atoi(parsed_line + match[1].rm_so);
1719 free(*fileloc);
1720 *fileloc = strdup(parsed_line);
1721 return 0;
1722 }
1723
1724 /* Process hex address followed by ':'. */
1725 line_ip = strtoull(parsed_line, &tmp, 16);
1726 if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') {
1727 u64 start = map__rip_2objdump(map, sym->start),
1728 end = map__rip_2objdump(map, sym->end);
1729
1730 offset = line_ip - start;
1731 if ((u64)line_ip < start || (u64)line_ip >= end)
1732 offset = -1;
1733 else
1734 parsed_line = tmp + 1;
1735 }
1736
1737 args->offset = offset;
1738 args->line = parsed_line;
1739 args->line_nr = *line_nr;
1740 args->fileloc = *fileloc;
1741 args->ms.sym = sym;
1742
1743 dl = disasm_line__new(args);
1744 (*line_nr)++;
1745
1746 if (dl == NULL)
1747 return -1;
1748
1749 if (!disasm_line__has_local_offset(dl)) {
1750 dl->ops.target.offset = dl->ops.target.addr -
1751 map__rip_2objdump(map, sym->start);
1752 dl->ops.target.offset_avail = true;
1753 }
1754
1755 /* kcore has no symbols, so add the call target symbol */
1756 if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
1757 struct addr_map_symbol target = {
1758 .addr = dl->ops.target.addr,
1759 .ms = { .map = map, },
1760 };
1761
1762 if (!maps__find_ams(args->ms.maps, &target) &&
1763 target.ms.sym->start == target.al_addr)
1764 dl->ops.target.sym = target.ms.sym;
1765 }
1766
1767 annotation_line__add(&dl->al, ¬es->src->source);
1768 return 0;
1769}
1770
1771static __attribute__((constructor)) void symbol__init_regexpr(void)
1772{
1773 regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
1774}
1775
1776static void delete_last_nop(struct symbol *sym)
1777{
1778 struct annotation *notes = symbol__annotation(sym);
1779 struct list_head *list = ¬es->src->source;
1780 struct disasm_line *dl;
1781
1782 while (!list_empty(list)) {
1783 dl = list_entry(list->prev, struct disasm_line, al.node);
1784
1785 if (dl->ins.ops) {
1786 if (dl->ins.ops != &nop_ops)
1787 return;
1788 } else {
1789 if (!strstr(dl->al.line, " nop ") &&
1790 !strstr(dl->al.line, " nopl ") &&
1791 !strstr(dl->al.line, " nopw "))
1792 return;
1793 }
1794
1795 list_del_init(&dl->al.node);
1796 disasm_line__free(dl);
1797 }
1798}
1799
1800int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen)
1801{
1802 struct dso *dso = map__dso(ms->map);
1803
1804 BUG_ON(buflen == 0);
1805
1806 if (errnum >= 0) {
1807 str_error_r(errnum, buf, buflen);
1808 return 0;
1809 }
1810
1811 switch (errnum) {
1812 case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
1813 char bf[SBUILD_ID_SIZE + 15] = " with build id ";
1814 char *build_id_msg = NULL;
1815
1816 if (dso->has_build_id) {
1817 build_id__sprintf(&dso->bid, bf + 15);
1818 build_id_msg = bf;
1819 }
1820 scnprintf(buf, buflen,
1821 "No vmlinux file%s\nwas found in the path.\n\n"
1822 "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
1823 "Please use:\n\n"
1824 " perf buildid-cache -vu vmlinux\n\n"
1825 "or:\n\n"
1826 " --vmlinux vmlinux\n", build_id_msg ?: "");
1827 }
1828 break;
1829 case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
1830 scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
1831 break;
1832 case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
1833 scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
1834 break;
1835 case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
1836 scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
1837 break;
1838 case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
1839 scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
1840 break;
1841 case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
1842 scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
1843 dso->long_name);
1844 break;
1845 default:
1846 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1847 break;
1848 }
1849
1850 return 0;
1851}
1852
1853static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
1854{
1855 char linkname[PATH_MAX];
1856 char *build_id_filename;
1857 char *build_id_path = NULL;
1858 char *pos;
1859 int len;
1860
1861 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1862 !dso__is_kcore(dso))
1863 return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
1864
1865 build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
1866 if (build_id_filename) {
1867 __symbol__join_symfs(filename, filename_size, build_id_filename);
1868 free(build_id_filename);
1869 } else {
1870 if (dso->has_build_id)
1871 return ENOMEM;
1872 goto fallback;
1873 }
1874
1875 build_id_path = strdup(filename);
1876 if (!build_id_path)
1877 return ENOMEM;
1878
1879 /*
1880 * old style build-id cache has name of XX/XXXXXXX.. while
1881 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
1882 * extract the build-id part of dirname in the new style only.
1883 */
1884 pos = strrchr(build_id_path, '/');
1885 if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
1886 dirname(build_id_path);
1887
1888 if (dso__is_kcore(dso))
1889 goto fallback;
1890
1891 len = readlink(build_id_path, linkname, sizeof(linkname) - 1);
1892 if (len < 0)
1893 goto fallback;
1894
1895 linkname[len] = '\0';
1896 if (strstr(linkname, DSO__NAME_KALLSYMS) ||
1897 access(filename, R_OK)) {
1898fallback:
1899 /*
1900 * If we don't have build-ids or the build-id file isn't in the
1901 * cache, or is just a kallsyms file, well, lets hope that this
1902 * DSO is the same as when 'perf record' ran.
1903 */
1904 if (dso->kernel && dso->long_name[0] == '/')
1905 snprintf(filename, filename_size, "%s", dso->long_name);
1906 else
1907 __symbol__join_symfs(filename, filename_size, dso->long_name);
1908
1909 mutex_lock(&dso->lock);
1910 if (access(filename, R_OK) && errno == ENOENT && dso->nsinfo) {
1911 char *new_name = dso__filename_with_chroot(dso, filename);
1912 if (new_name) {
1913 strlcpy(filename, new_name, filename_size);
1914 free(new_name);
1915 }
1916 }
1917 mutex_unlock(&dso->lock);
1918 }
1919
1920 free(build_id_path);
1921 return 0;
1922}
1923
1924#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
1925#define PACKAGE "perf"
1926#include <bfd.h>
1927#include <dis-asm.h>
1928#include <bpf/bpf.h>
1929#include <bpf/btf.h>
1930#include <bpf/libbpf.h>
1931#include <linux/btf.h>
1932#include <tools/dis-asm-compat.h>
1933
1934static int symbol__disassemble_bpf(struct symbol *sym,
1935 struct annotate_args *args)
1936{
1937 struct annotation *notes = symbol__annotation(sym);
1938 struct bpf_prog_linfo *prog_linfo = NULL;
1939 struct bpf_prog_info_node *info_node;
1940 int len = sym->end - sym->start;
1941 disassembler_ftype disassemble;
1942 struct map *map = args->ms.map;
1943 struct perf_bpil *info_linear;
1944 struct disassemble_info info;
1945 struct dso *dso = map__dso(map);
1946 int pc = 0, count, sub_id;
1947 struct btf *btf = NULL;
1948 char tpath[PATH_MAX];
1949 size_t buf_size;
1950 int nr_skip = 0;
1951 char *buf;
1952 bfd *bfdf;
1953 int ret;
1954 FILE *s;
1955
1956 if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
1957 return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
1958
1959 pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
1960 sym->name, sym->start, sym->end - sym->start);
1961
1962 memset(tpath, 0, sizeof(tpath));
1963 perf_exe(tpath, sizeof(tpath));
1964
1965 bfdf = bfd_openr(tpath, NULL);
1966 if (bfdf == NULL)
1967 abort();
1968
1969 if (!bfd_check_format(bfdf, bfd_object))
1970 abort();
1971
1972 s = open_memstream(&buf, &buf_size);
1973 if (!s) {
1974 ret = errno;
1975 goto out;
1976 }
1977 init_disassemble_info_compat(&info, s,
1978 (fprintf_ftype) fprintf,
1979 fprintf_styled);
1980 info.arch = bfd_get_arch(bfdf);
1981 info.mach = bfd_get_mach(bfdf);
1982
1983 info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
1984 dso->bpf_prog.id);
1985 if (!info_node) {
1986 ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
1987 goto out;
1988 }
1989 info_linear = info_node->info_linear;
1990 sub_id = dso->bpf_prog.sub_id;
1991
1992 info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
1993 info.buffer_length = info_linear->info.jited_prog_len;
1994
1995 if (info_linear->info.nr_line_info)
1996 prog_linfo = bpf_prog_linfo__new(&info_linear->info);
1997
1998 if (info_linear->info.btf_id) {
1999 struct btf_node *node;
2000
2001 node = perf_env__find_btf(dso->bpf_prog.env,
2002 info_linear->info.btf_id);
2003 if (node)
2004 btf = btf__new((__u8 *)(node->data),
2005 node->data_size);
2006 }
2007
2008 disassemble_init_for_target(&info);
2009
2010#ifdef DISASM_FOUR_ARGS_SIGNATURE
2011 disassemble = disassembler(info.arch,
2012 bfd_big_endian(bfdf),
2013 info.mach,
2014 bfdf);
2015#else
2016 disassemble = disassembler(bfdf);
2017#endif
2018 if (disassemble == NULL)
2019 abort();
2020
2021 fflush(s);
2022 do {
2023 const struct bpf_line_info *linfo = NULL;
2024 struct disasm_line *dl;
2025 size_t prev_buf_size;
2026 const char *srcline;
2027 u64 addr;
2028
2029 addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
2030 count = disassemble(pc, &info);
2031
2032 if (prog_linfo)
2033 linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
2034 addr, sub_id,
2035 nr_skip);
2036
2037 if (linfo && btf) {
2038 srcline = btf__name_by_offset(btf, linfo->line_off);
2039 nr_skip++;
2040 } else
2041 srcline = NULL;
2042
2043 fprintf(s, "\n");
2044 prev_buf_size = buf_size;
2045 fflush(s);
2046
2047 if (!annotate_opts.hide_src_code && srcline) {
2048 args->offset = -1;
2049 args->line = strdup(srcline);
2050 args->line_nr = 0;
2051 args->fileloc = NULL;
2052 args->ms.sym = sym;
2053 dl = disasm_line__new(args);
2054 if (dl) {
2055 annotation_line__add(&dl->al,
2056 ¬es->src->source);
2057 }
2058 }
2059
2060 args->offset = pc;
2061 args->line = buf + prev_buf_size;
2062 args->line_nr = 0;
2063 args->fileloc = NULL;
2064 args->ms.sym = sym;
2065 dl = disasm_line__new(args);
2066 if (dl)
2067 annotation_line__add(&dl->al, ¬es->src->source);
2068
2069 pc += count;
2070 } while (count > 0 && pc < len);
2071
2072 ret = 0;
2073out:
2074 free(prog_linfo);
2075 btf__free(btf);
2076 fclose(s);
2077 bfd_close(bfdf);
2078 return ret;
2079}
2080#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
2081static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
2082 struct annotate_args *args __maybe_unused)
2083{
2084 return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
2085}
2086#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
2087
2088static int
2089symbol__disassemble_bpf_image(struct symbol *sym,
2090 struct annotate_args *args)
2091{
2092 struct annotation *notes = symbol__annotation(sym);
2093 struct disasm_line *dl;
2094
2095 args->offset = -1;
2096 args->line = strdup("to be implemented");
2097 args->line_nr = 0;
2098 args->fileloc = NULL;
2099 dl = disasm_line__new(args);
2100 if (dl)
2101 annotation_line__add(&dl->al, ¬es->src->source);
2102
2103 zfree(&args->line);
2104 return 0;
2105}
2106
2107/*
2108 * Possibly create a new version of line with tabs expanded. Returns the
2109 * existing or new line, storage is updated if a new line is allocated. If
2110 * allocation fails then NULL is returned.
2111 */
2112static char *expand_tabs(char *line, char **storage, size_t *storage_len)
2113{
2114 size_t i, src, dst, len, new_storage_len, num_tabs;
2115 char *new_line;
2116 size_t line_len = strlen(line);
2117
2118 for (num_tabs = 0, i = 0; i < line_len; i++)
2119 if (line[i] == '\t')
2120 num_tabs++;
2121
2122 if (num_tabs == 0)
2123 return line;
2124
2125 /*
2126 * Space for the line and '\0', less the leading and trailing
2127 * spaces. Each tab may introduce 7 additional spaces.
2128 */
2129 new_storage_len = line_len + 1 + (num_tabs * 7);
2130
2131 new_line = malloc(new_storage_len);
2132 if (new_line == NULL) {
2133 pr_err("Failure allocating memory for tab expansion\n");
2134 return NULL;
2135 }
2136
2137 /*
2138 * Copy regions starting at src and expand tabs. If there are two
2139 * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces
2140 * are inserted.
2141 */
2142 for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) {
2143 if (line[i] == '\t') {
2144 len = i - src;
2145 memcpy(&new_line[dst], &line[src], len);
2146 dst += len;
2147 new_line[dst++] = ' ';
2148 while (dst % 8 != 0)
2149 new_line[dst++] = ' ';
2150 src = i + 1;
2151 num_tabs--;
2152 }
2153 }
2154
2155 /* Expand the last region. */
2156 len = line_len - src;
2157 memcpy(&new_line[dst], &line[src], len);
2158 dst += len;
2159 new_line[dst] = '\0';
2160
2161 free(*storage);
2162 *storage = new_line;
2163 *storage_len = new_storage_len;
2164 return new_line;
2165
2166}
2167
2168static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
2169{
2170 struct annotation_options *opts = &annotate_opts;
2171 struct map *map = args->ms.map;
2172 struct dso *dso = map__dso(map);
2173 char *command;
2174 FILE *file;
2175 char symfs_filename[PATH_MAX];
2176 struct kcore_extract kce;
2177 bool delete_extract = false;
2178 bool decomp = false;
2179 int lineno = 0;
2180 char *fileloc = NULL;
2181 int nline;
2182 char *line;
2183 size_t line_len;
2184 const char *objdump_argv[] = {
2185 "/bin/sh",
2186 "-c",
2187 NULL, /* Will be the objdump command to run. */
2188 "--",
2189 NULL, /* Will be the symfs path. */
2190 NULL,
2191 };
2192 struct child_process objdump_process;
2193 int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
2194
2195 if (err)
2196 return err;
2197
2198 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
2199 symfs_filename, sym->name, map__unmap_ip(map, sym->start),
2200 map__unmap_ip(map, sym->end));
2201
2202 pr_debug("annotating [%p] %30s : [%p] %30s\n",
2203 dso, dso->long_name, sym, sym->name);
2204
2205 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
2206 return symbol__disassemble_bpf(sym, args);
2207 } else if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) {
2208 return symbol__disassemble_bpf_image(sym, args);
2209 } else if (dso__is_kcore(dso)) {
2210 kce.kcore_filename = symfs_filename;
2211 kce.addr = map__rip_2objdump(map, sym->start);
2212 kce.offs = sym->start;
2213 kce.len = sym->end - sym->start;
2214 if (!kcore_extract__create(&kce)) {
2215 delete_extract = true;
2216 strlcpy(symfs_filename, kce.extract_filename,
2217 sizeof(symfs_filename));
2218 }
2219 } else if (dso__needs_decompress(dso)) {
2220 char tmp[KMOD_DECOMP_LEN];
2221
2222 if (dso__decompress_kmodule_path(dso, symfs_filename,
2223 tmp, sizeof(tmp)) < 0)
2224 return -1;
2225
2226 decomp = true;
2227 strcpy(symfs_filename, tmp);
2228 }
2229
2230 err = asprintf(&command,
2231 "%s %s%s --start-address=0x%016" PRIx64
2232 " --stop-address=0x%016" PRIx64
2233 " %s -d %s %s %s %c%s%c %s%s -C \"$1\"",
2234 opts->objdump_path ?: "objdump",
2235 opts->disassembler_style ? "-M " : "",
2236 opts->disassembler_style ?: "",
2237 map__rip_2objdump(map, sym->start),
2238 map__rip_2objdump(map, sym->end),
2239 opts->show_linenr ? "-l" : "",
2240 opts->show_asm_raw ? "" : "--no-show-raw-insn",
2241 opts->annotate_src ? "-S" : "",
2242 opts->prefix ? "--prefix " : "",
2243 opts->prefix ? '"' : ' ',
2244 opts->prefix ?: "",
2245 opts->prefix ? '"' : ' ',
2246 opts->prefix_strip ? "--prefix-strip=" : "",
2247 opts->prefix_strip ?: "");
2248
2249 if (err < 0) {
2250 pr_err("Failure allocating memory for the command to run\n");
2251 goto out_remove_tmp;
2252 }
2253
2254 pr_debug("Executing: %s\n", command);
2255
2256 objdump_argv[2] = command;
2257 objdump_argv[4] = symfs_filename;
2258
2259 /* Create a pipe to read from for stdout */
2260 memset(&objdump_process, 0, sizeof(objdump_process));
2261 objdump_process.argv = objdump_argv;
2262 objdump_process.out = -1;
2263 objdump_process.err = -1;
2264 objdump_process.no_stderr = 1;
2265 if (start_command(&objdump_process)) {
2266 pr_err("Failure starting to run %s\n", command);
2267 err = -1;
2268 goto out_free_command;
2269 }
2270
2271 file = fdopen(objdump_process.out, "r");
2272 if (!file) {
2273 pr_err("Failure creating FILE stream for %s\n", command);
2274 /*
2275 * If we were using debug info should retry with
2276 * original binary.
2277 */
2278 err = -1;
2279 goto out_close_stdout;
2280 }
2281
2282 /* Storage for getline. */
2283 line = NULL;
2284 line_len = 0;
2285
2286 nline = 0;
2287 while (!feof(file)) {
2288 const char *match;
2289 char *expanded_line;
2290
2291 if (getline(&line, &line_len, file) < 0 || !line)
2292 break;
2293
2294 /* Skip lines containing "filename:" */
2295 match = strstr(line, symfs_filename);
2296 if (match && match[strlen(symfs_filename)] == ':')
2297 continue;
2298
2299 expanded_line = strim(line);
2300 expanded_line = expand_tabs(expanded_line, &line, &line_len);
2301 if (!expanded_line)
2302 break;
2303
2304 /*
2305 * The source code line number (lineno) needs to be kept in
2306 * across calls to symbol__parse_objdump_line(), so that it
2307 * can associate it with the instructions till the next one.
2308 * See disasm_line__new() and struct disasm_line::line_nr.
2309 */
2310 if (symbol__parse_objdump_line(sym, args, expanded_line,
2311 &lineno, &fileloc) < 0)
2312 break;
2313 nline++;
2314 }
2315 free(line);
2316 free(fileloc);
2317
2318 err = finish_command(&objdump_process);
2319 if (err)
2320 pr_err("Error running %s\n", command);
2321
2322 if (nline == 0) {
2323 err = -1;
2324 pr_err("No output from %s\n", command);
2325 }
2326
2327 /*
2328 * kallsyms does not have symbol sizes so there may a nop at the end.
2329 * Remove it.
2330 */
2331 if (dso__is_kcore(dso))
2332 delete_last_nop(sym);
2333
2334 fclose(file);
2335
2336out_close_stdout:
2337 close(objdump_process.out);
2338
2339out_free_command:
2340 free(command);
2341
2342out_remove_tmp:
2343 if (decomp)
2344 unlink(symfs_filename);
2345
2346 if (delete_extract)
2347 kcore_extract__delete(&kce);
2348
2349 return err;
2350}
2351
2352static void calc_percent(struct annotation *notes,
2353 struct evsel *evsel,
2354 struct annotation_data *data,
2355 s64 offset, s64 end)
2356{
2357 struct hists *hists = evsel__hists(evsel);
2358 int evidx = evsel->core.idx;
2359 struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
2360 unsigned int hits = 0;
2361 u64 period = 0;
2362
2363 while (offset < end) {
2364 struct sym_hist_entry *entry;
2365
2366 entry = annotated_source__hist_entry(notes->src, evidx, offset);
2367 if (entry) {
2368 hits += entry->nr_samples;
2369 period += entry->period;
2370 }
2371 ++offset;
2372 }
2373
2374 if (sym_hist->nr_samples) {
2375 data->he.period = period;
2376 data->he.nr_samples = hits;
2377 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
2378 }
2379
2380 if (hists->stats.nr_non_filtered_samples)
2381 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
2382
2383 if (sym_hist->period)
2384 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
2385
2386 if (hists->stats.total_period)
2387 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
2388}
2389
2390static void annotation__calc_percent(struct annotation *notes,
2391 struct evsel *leader, s64 len)
2392{
2393 struct annotation_line *al, *next;
2394 struct evsel *evsel;
2395
2396 list_for_each_entry(al, ¬es->src->source, node) {
2397 s64 end;
2398 int i = 0;
2399
2400 if (al->offset == -1)
2401 continue;
2402
2403 next = annotation_line__next(al, ¬es->src->source);
2404 end = next ? next->offset : len;
2405
2406 for_each_group_evsel(evsel, leader) {
2407 struct annotation_data *data;
2408
2409 BUG_ON(i >= al->data_nr);
2410
2411 data = &al->data[i++];
2412
2413 calc_percent(notes, evsel, data, al->offset, end);
2414 }
2415 }
2416}
2417
2418void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
2419{
2420 struct annotation *notes = symbol__annotation(sym);
2421
2422 annotation__calc_percent(notes, evsel, symbol__size(sym));
2423}
2424
2425static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
2426{
2427 struct perf_env *env = evsel__env(evsel);
2428 const char *arch_name = perf_env__arch(env);
2429 struct arch *arch;
2430 int err;
2431
2432 if (!arch_name)
2433 return errno;
2434
2435 *parch = arch = arch__find(arch_name);
2436 if (arch == NULL) {
2437 pr_err("%s: unsupported arch %s\n", __func__, arch_name);
2438 return ENOTSUP;
2439 }
2440
2441 if (arch->init) {
2442 err = arch->init(arch, env ? env->cpuid : NULL);
2443 if (err) {
2444 pr_err("%s: failed to initialize %s arch priv area\n",
2445 __func__, arch->name);
2446 return err;
2447 }
2448 }
2449 return 0;
2450}
2451
2452int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
2453 struct arch **parch)
2454{
2455 struct symbol *sym = ms->sym;
2456 struct annotation *notes = symbol__annotation(sym);
2457 struct annotate_args args = {
2458 .evsel = evsel,
2459 .options = &annotate_opts,
2460 };
2461 struct arch *arch = NULL;
2462 int err;
2463
2464 err = evsel__get_arch(evsel, &arch);
2465 if (err < 0)
2466 return err;
2467
2468 if (parch)
2469 *parch = arch;
2470
2471 if (!list_empty(¬es->src->source))
2472 return 0;
2473
2474 args.arch = arch;
2475 args.ms = *ms;
2476 if (annotate_opts.full_addr)
2477 notes->start = map__objdump_2mem(ms->map, ms->sym->start);
2478 else
2479 notes->start = map__rip_2objdump(ms->map, ms->sym->start);
2480
2481 return symbol__disassemble(sym, &args);
2482}
2483
2484static void insert_source_line(struct rb_root *root, struct annotation_line *al)
2485{
2486 struct annotation_line *iter;
2487 struct rb_node **p = &root->rb_node;
2488 struct rb_node *parent = NULL;
2489 unsigned int percent_type = annotate_opts.percent_type;
2490 int i, ret;
2491
2492 while (*p != NULL) {
2493 parent = *p;
2494 iter = rb_entry(parent, struct annotation_line, rb_node);
2495
2496 ret = strcmp(iter->path, al->path);
2497 if (ret == 0) {
2498 for (i = 0; i < al->data_nr; i++) {
2499 iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
2500 percent_type);
2501 }
2502 return;
2503 }
2504
2505 if (ret < 0)
2506 p = &(*p)->rb_left;
2507 else
2508 p = &(*p)->rb_right;
2509 }
2510
2511 for (i = 0; i < al->data_nr; i++) {
2512 al->data[i].percent_sum = annotation_data__percent(&al->data[i],
2513 percent_type);
2514 }
2515
2516 rb_link_node(&al->rb_node, parent, p);
2517 rb_insert_color(&al->rb_node, root);
2518}
2519
2520static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
2521{
2522 int i;
2523
2524 for (i = 0; i < a->data_nr; i++) {
2525 if (a->data[i].percent_sum == b->data[i].percent_sum)
2526 continue;
2527 return a->data[i].percent_sum > b->data[i].percent_sum;
2528 }
2529
2530 return 0;
2531}
2532
2533static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
2534{
2535 struct annotation_line *iter;
2536 struct rb_node **p = &root->rb_node;
2537 struct rb_node *parent = NULL;
2538
2539 while (*p != NULL) {
2540 parent = *p;
2541 iter = rb_entry(parent, struct annotation_line, rb_node);
2542
2543 if (cmp_source_line(al, iter))
2544 p = &(*p)->rb_left;
2545 else
2546 p = &(*p)->rb_right;
2547 }
2548
2549 rb_link_node(&al->rb_node, parent, p);
2550 rb_insert_color(&al->rb_node, root);
2551}
2552
2553static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
2554{
2555 struct annotation_line *al;
2556 struct rb_node *node;
2557
2558 node = rb_first(src_root);
2559 while (node) {
2560 struct rb_node *next;
2561
2562 al = rb_entry(node, struct annotation_line, rb_node);
2563 next = rb_next(node);
2564 rb_erase(node, src_root);
2565
2566 __resort_source_line(dest_root, al);
2567 node = next;
2568 }
2569}
2570
2571static void print_summary(struct rb_root *root, const char *filename)
2572{
2573 struct annotation_line *al;
2574 struct rb_node *node;
2575
2576 printf("\nSorted summary for file %s\n", filename);
2577 printf("----------------------------------------------\n\n");
2578
2579 if (RB_EMPTY_ROOT(root)) {
2580 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
2581 return;
2582 }
2583
2584 node = rb_first(root);
2585 while (node) {
2586 double percent, percent_max = 0.0;
2587 const char *color;
2588 char *path;
2589 int i;
2590
2591 al = rb_entry(node, struct annotation_line, rb_node);
2592 for (i = 0; i < al->data_nr; i++) {
2593 percent = al->data[i].percent_sum;
2594 color = get_percent_color(percent);
2595 color_fprintf(stdout, color, " %7.2f", percent);
2596
2597 if (percent > percent_max)
2598 percent_max = percent;
2599 }
2600
2601 path = al->path;
2602 color = get_percent_color(percent_max);
2603 color_fprintf(stdout, color, " %s\n", path);
2604
2605 node = rb_next(node);
2606 }
2607}
2608
2609static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
2610{
2611 int evidx = evsel->core.idx;
2612 struct annotation *notes = symbol__annotation(sym);
2613 struct sym_hist *h = annotation__histogram(notes, evidx);
2614 u64 len = symbol__size(sym), offset;
2615
2616 for (offset = 0; offset < len; ++offset) {
2617 struct sym_hist_entry *entry;
2618
2619 entry = annotated_source__hist_entry(notes->src, evidx, offset);
2620 if (entry && entry->nr_samples != 0)
2621 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
2622 sym->start + offset, entry->nr_samples);
2623 }
2624 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
2625}
2626
2627static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
2628{
2629 char bf[32];
2630 struct annotation_line *line;
2631
2632 list_for_each_entry_reverse(line, lines, node) {
2633 if (line->offset != -1)
2634 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
2635 }
2636
2637 return 0;
2638}
2639
2640int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
2641{
2642 struct map *map = ms->map;
2643 struct symbol *sym = ms->sym;
2644 struct dso *dso = map__dso(map);
2645 char *filename;
2646 const char *d_filename;
2647 const char *evsel_name = evsel__name(evsel);
2648 struct annotation *notes = symbol__annotation(sym);
2649 struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
2650 struct annotation_line *pos, *queue = NULL;
2651 struct annotation_options *opts = &annotate_opts;
2652 u64 start = map__rip_2objdump(map, sym->start);
2653 int printed = 2, queue_len = 0, addr_fmt_width;
2654 int more = 0;
2655 bool context = opts->context;
2656 u64 len;
2657 int width = symbol_conf.show_total_period ? 12 : 8;
2658 int graph_dotted_len;
2659 char buf[512];
2660
2661 filename = strdup(dso->long_name);
2662 if (!filename)
2663 return -ENOMEM;
2664
2665 if (opts->full_path)
2666 d_filename = filename;
2667 else
2668 d_filename = basename(filename);
2669
2670 len = symbol__size(sym);
2671
2672 if (evsel__is_group_event(evsel)) {
2673 width *= evsel->core.nr_members;
2674 evsel__group_desc(evsel, buf, sizeof(buf));
2675 evsel_name = buf;
2676 }
2677
2678 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
2679 "percent: %s)\n",
2680 width, width, symbol_conf.show_total_period ? "Period" :
2681 symbol_conf.show_nr_samples ? "Samples" : "Percent",
2682 d_filename, evsel_name, h->nr_samples,
2683 percent_type_str(opts->percent_type));
2684
2685 printf("%-*.*s----\n",
2686 graph_dotted_len, graph_dotted_len, graph_dotted_line);
2687
2688 if (verbose > 0)
2689 symbol__annotate_hits(sym, evsel);
2690
2691 addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start);
2692
2693 list_for_each_entry(pos, ¬es->src->source, node) {
2694 int err;
2695
2696 if (context && queue == NULL) {
2697 queue = pos;
2698 queue_len = 0;
2699 }
2700
2701 err = annotation_line__print(pos, sym, start, evsel, len,
2702 opts->min_pcnt, printed, opts->max_lines,
2703 queue, addr_fmt_width, opts->percent_type);
2704
2705 switch (err) {
2706 case 0:
2707 ++printed;
2708 if (context) {
2709 printed += queue_len;
2710 queue = NULL;
2711 queue_len = 0;
2712 }
2713 break;
2714 case 1:
2715 /* filtered by max_lines */
2716 ++more;
2717 break;
2718 case -1:
2719 default:
2720 /*
2721 * Filtered by min_pcnt or non IP lines when
2722 * context != 0
2723 */
2724 if (!context)
2725 break;
2726 if (queue_len == context)
2727 queue = list_entry(queue->node.next, typeof(*queue), node);
2728 else
2729 ++queue_len;
2730 break;
2731 }
2732 }
2733
2734 free(filename);
2735
2736 return more;
2737}
2738
2739static void FILE__set_percent_color(void *fp __maybe_unused,
2740 double percent __maybe_unused,
2741 bool current __maybe_unused)
2742{
2743}
2744
2745static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
2746 int nr __maybe_unused, bool current __maybe_unused)
2747{
2748 return 0;
2749}
2750
2751static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
2752{
2753 return 0;
2754}
2755
2756static void FILE__printf(void *fp, const char *fmt, ...)
2757{
2758 va_list args;
2759
2760 va_start(args, fmt);
2761 vfprintf(fp, fmt, args);
2762 va_end(args);
2763}
2764
2765static void FILE__write_graph(void *fp, int graph)
2766{
2767 const char *s;
2768 switch (graph) {
2769
2770 case DARROW_CHAR: s = "↓"; break;
2771 case UARROW_CHAR: s = "↑"; break;
2772 case LARROW_CHAR: s = "←"; break;
2773 case RARROW_CHAR: s = "→"; break;
2774 default: s = "?"; break;
2775 }
2776
2777 fputs(s, fp);
2778}
2779
2780static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
2781{
2782 struct annotation *notes = symbol__annotation(sym);
2783 struct annotation_write_ops wops = {
2784 .first_line = true,
2785 .obj = fp,
2786 .set_color = FILE__set_color,
2787 .set_percent_color = FILE__set_percent_color,
2788 .set_jumps_percent_color = FILE__set_jumps_percent_color,
2789 .printf = FILE__printf,
2790 .write_graph = FILE__write_graph,
2791 };
2792 struct annotation_line *al;
2793
2794 list_for_each_entry(al, ¬es->src->source, node) {
2795 if (annotation_line__filter(al))
2796 continue;
2797 annotation_line__write(al, notes, &wops);
2798 fputc('\n', fp);
2799 wops.first_line = false;
2800 }
2801
2802 return 0;
2803}
2804
2805int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
2806{
2807 const char *ev_name = evsel__name(evsel);
2808 char buf[1024];
2809 char *filename;
2810 int err = -1;
2811 FILE *fp;
2812
2813 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
2814 return -1;
2815
2816 fp = fopen(filename, "w");
2817 if (fp == NULL)
2818 goto out_free_filename;
2819
2820 if (evsel__is_group_event(evsel)) {
2821 evsel__group_desc(evsel, buf, sizeof(buf));
2822 ev_name = buf;
2823 }
2824
2825 fprintf(fp, "%s() %s\nEvent: %s\n\n",
2826 ms->sym->name, map__dso(ms->map)->long_name, ev_name);
2827 symbol__annotate_fprintf2(ms->sym, fp);
2828
2829 fclose(fp);
2830 err = 0;
2831out_free_filename:
2832 free(filename);
2833 return err;
2834}
2835
2836void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
2837{
2838 struct annotation *notes = symbol__annotation(sym);
2839 struct sym_hist *h = annotation__histogram(notes, evidx);
2840
2841 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
2842}
2843
2844void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
2845{
2846 struct annotation *notes = symbol__annotation(sym);
2847 struct sym_hist *h = annotation__histogram(notes, evidx);
2848 int len = symbol__size(sym), offset;
2849
2850 h->nr_samples = 0;
2851 for (offset = 0; offset < len; ++offset) {
2852 struct sym_hist_entry *entry;
2853
2854 entry = annotated_source__hist_entry(notes->src, evidx, offset);
2855 if (entry == NULL)
2856 continue;
2857
2858 entry->nr_samples = entry->nr_samples * 7 / 8;
2859 h->nr_samples += entry->nr_samples;
2860 }
2861}
2862
2863void annotated_source__purge(struct annotated_source *as)
2864{
2865 struct annotation_line *al, *n;
2866
2867 list_for_each_entry_safe(al, n, &as->source, node) {
2868 list_del_init(&al->node);
2869 disasm_line__free(disasm_line(al));
2870 }
2871}
2872
2873static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
2874{
2875 size_t printed;
2876
2877 if (dl->al.offset == -1)
2878 return fprintf(fp, "%s\n", dl->al.line);
2879
2880 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
2881
2882 if (dl->ops.raw[0] != '\0') {
2883 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
2884 dl->ops.raw);
2885 }
2886
2887 return printed + fprintf(fp, "\n");
2888}
2889
2890size_t disasm__fprintf(struct list_head *head, FILE *fp)
2891{
2892 struct disasm_line *pos;
2893 size_t printed = 0;
2894
2895 list_for_each_entry(pos, head, al.node)
2896 printed += disasm_line__fprintf(pos, fp);
2897
2898 return printed;
2899}
2900
2901bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
2902{
2903 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
2904 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
2905 dl->ops.target.offset >= (s64)symbol__size(sym))
2906 return false;
2907
2908 return true;
2909}
2910
2911void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
2912{
2913 u64 offset, size = symbol__size(sym);
2914
2915 /* PLT symbols contain external offsets */
2916 if (strstr(sym->name, "@plt"))
2917 return;
2918
2919 for (offset = 0; offset < size; ++offset) {
2920 struct annotation_line *al = notes->src->offsets[offset];
2921 struct disasm_line *dl;
2922
2923 dl = disasm_line(al);
2924
2925 if (!disasm_line__is_valid_local_jump(dl, sym))
2926 continue;
2927
2928 al = notes->src->offsets[dl->ops.target.offset];
2929
2930 /*
2931 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
2932 * have to adjust to the previous offset?
2933 */
2934 if (al == NULL)
2935 continue;
2936
2937 if (++al->jump_sources > notes->max_jump_sources)
2938 notes->max_jump_sources = al->jump_sources;
2939 }
2940}
2941
2942void annotation__set_offsets(struct annotation *notes, s64 size)
2943{
2944 struct annotation_line *al;
2945 struct annotated_source *src = notes->src;
2946
2947 src->max_line_len = 0;
2948 src->nr_entries = 0;
2949 src->nr_asm_entries = 0;
2950
2951 list_for_each_entry(al, &src->source, node) {
2952 size_t line_len = strlen(al->line);
2953
2954 if (src->max_line_len < line_len)
2955 src->max_line_len = line_len;
2956 al->idx = src->nr_entries++;
2957 if (al->offset != -1) {
2958 al->idx_asm = src->nr_asm_entries++;
2959 /*
2960 * FIXME: short term bandaid to cope with assembly
2961 * routines that comes with labels in the same column
2962 * as the address in objdump, sigh.
2963 *
2964 * E.g. copy_user_generic_unrolled
2965 */
2966 if (al->offset < size)
2967 notes->src->offsets[al->offset] = al;
2968 } else
2969 al->idx_asm = -1;
2970 }
2971}
2972
2973static inline int width_jumps(int n)
2974{
2975 if (n >= 100)
2976 return 5;
2977 if (n / 10)
2978 return 2;
2979 return 1;
2980}
2981
2982static int annotation__max_ins_name(struct annotation *notes)
2983{
2984 int max_name = 0, len;
2985 struct annotation_line *al;
2986
2987 list_for_each_entry(al, ¬es->src->source, node) {
2988 if (al->offset == -1)
2989 continue;
2990
2991 len = strlen(disasm_line(al)->ins.name);
2992 if (max_name < len)
2993 max_name = len;
2994 }
2995
2996 return max_name;
2997}
2998
2999void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
3000{
3001 notes->widths.addr = notes->widths.target =
3002 notes->widths.min_addr = hex_width(symbol__size(sym));
3003 notes->widths.max_addr = hex_width(sym->end);
3004 notes->widths.jumps = width_jumps(notes->max_jump_sources);
3005 notes->widths.max_ins_name = annotation__max_ins_name(notes);
3006}
3007
3008void annotation__update_column_widths(struct annotation *notes)
3009{
3010 if (annotate_opts.use_offset)
3011 notes->widths.target = notes->widths.min_addr;
3012 else if (annotate_opts.full_addr)
3013 notes->widths.target = BITS_PER_LONG / 4;
3014 else
3015 notes->widths.target = notes->widths.max_addr;
3016
3017 notes->widths.addr = notes->widths.target;
3018
3019 if (annotate_opts.show_nr_jumps)
3020 notes->widths.addr += notes->widths.jumps + 1;
3021}
3022
3023void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
3024{
3025 annotate_opts.full_addr = !annotate_opts.full_addr;
3026
3027 if (annotate_opts.full_addr)
3028 notes->start = map__objdump_2mem(ms->map, ms->sym->start);
3029 else
3030 notes->start = map__rip_2objdump(ms->map, ms->sym->start);
3031
3032 annotation__update_column_widths(notes);
3033}
3034
3035static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
3036 struct rb_root *root)
3037{
3038 struct annotation_line *al;
3039 struct rb_root tmp_root = RB_ROOT;
3040
3041 list_for_each_entry(al, ¬es->src->source, node) {
3042 double percent_max = 0.0;
3043 u64 addr;
3044 int i;
3045
3046 for (i = 0; i < al->data_nr; i++) {
3047 double percent;
3048
3049 percent = annotation_data__percent(&al->data[i],
3050 annotate_opts.percent_type);
3051
3052 if (percent > percent_max)
3053 percent_max = percent;
3054 }
3055
3056 if (percent_max <= 0.5)
3057 continue;
3058
3059 addr = map__rip_2objdump(ms->map, ms->sym->start);
3060 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
3061 false, true, ms->sym->start + al->offset);
3062 insert_source_line(&tmp_root, al);
3063 }
3064
3065 resort_source_line(root, &tmp_root);
3066}
3067
3068static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
3069{
3070 struct annotation *notes = symbol__annotation(ms->sym);
3071
3072 annotation__calc_lines(notes, ms, root);
3073}
3074
3075int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
3076{
3077 struct dso *dso = map__dso(ms->map);
3078 struct symbol *sym = ms->sym;
3079 struct rb_root source_line = RB_ROOT;
3080 struct hists *hists = evsel__hists(evsel);
3081 char buf[1024];
3082 int err;
3083
3084 err = symbol__annotate2(ms, evsel, NULL);
3085 if (err) {
3086 char msg[BUFSIZ];
3087
3088 dso->annotate_warned = true;
3089 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
3090 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
3091 return -1;
3092 }
3093
3094 if (annotate_opts.print_lines) {
3095 srcline_full_filename = annotate_opts.full_path;
3096 symbol__calc_lines(ms, &source_line);
3097 print_summary(&source_line, dso->long_name);
3098 }
3099
3100 hists__scnprintf_title(hists, buf, sizeof(buf));
3101 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
3102 buf, percent_type_str(annotate_opts.percent_type), sym->name,
3103 dso->long_name);
3104 symbol__annotate_fprintf2(sym, stdout);
3105
3106 annotated_source__purge(symbol__annotation(sym)->src);
3107
3108 return 0;
3109}
3110
3111int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
3112{
3113 struct dso *dso = map__dso(ms->map);
3114 struct symbol *sym = ms->sym;
3115 struct rb_root source_line = RB_ROOT;
3116 int err;
3117
3118 err = symbol__annotate(ms, evsel, NULL);
3119 if (err) {
3120 char msg[BUFSIZ];
3121
3122 dso->annotate_warned = true;
3123 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
3124 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
3125 return -1;
3126 }
3127
3128 symbol__calc_percent(sym, evsel);
3129
3130 if (annotate_opts.print_lines) {
3131 srcline_full_filename = annotate_opts.full_path;
3132 symbol__calc_lines(ms, &source_line);
3133 print_summary(&source_line, dso->long_name);
3134 }
3135
3136 symbol__annotate_printf(ms, evsel);
3137
3138 annotated_source__purge(symbol__annotation(sym)->src);
3139
3140 return 0;
3141}
3142
3143bool ui__has_annotation(void)
3144{
3145 return use_browser == 1 && perf_hpp_list.sym;
3146}
3147
3148
3149static double annotation_line__max_percent(struct annotation_line *al,
3150 struct annotation *notes,
3151 unsigned int percent_type)
3152{
3153 double percent_max = 0.0;
3154 int i;
3155
3156 for (i = 0; i < notes->nr_events; i++) {
3157 double percent;
3158
3159 percent = annotation_data__percent(&al->data[i],
3160 percent_type);
3161
3162 if (percent > percent_max)
3163 percent_max = percent;
3164 }
3165
3166 return percent_max;
3167}
3168
3169static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
3170 void *obj, char *bf, size_t size,
3171 void (*obj__printf)(void *obj, const char *fmt, ...),
3172 void (*obj__write_graph)(void *obj, int graph))
3173{
3174 if (dl->ins.ops && dl->ins.ops->scnprintf) {
3175 if (ins__is_jump(&dl->ins)) {
3176 bool fwd;
3177
3178 if (dl->ops.target.outside)
3179 goto call_like;
3180 fwd = dl->ops.target.offset > dl->al.offset;
3181 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
3182 obj__printf(obj, " ");
3183 } else if (ins__is_call(&dl->ins)) {
3184call_like:
3185 obj__write_graph(obj, RARROW_CHAR);
3186 obj__printf(obj, " ");
3187 } else if (ins__is_ret(&dl->ins)) {
3188 obj__write_graph(obj, LARROW_CHAR);
3189 obj__printf(obj, " ");
3190 } else {
3191 obj__printf(obj, " ");
3192 }
3193 } else {
3194 obj__printf(obj, " ");
3195 }
3196
3197 disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name);
3198}
3199
3200static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
3201{
3202 double ipc = 0.0, coverage = 0.0;
3203 struct annotated_branch *branch = annotation__get_branch(notes);
3204
3205 if (branch && branch->hit_cycles)
3206 ipc = branch->hit_insn / ((double)branch->hit_cycles);
3207
3208 if (branch && branch->total_insn) {
3209 coverage = branch->cover_insn * 100.0 /
3210 ((double)branch->total_insn);
3211 }
3212
3213 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
3214 ipc, coverage);
3215}
3216
3217static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
3218 bool first_line, bool current_entry, bool change_color, int width,
3219 void *obj, unsigned int percent_type,
3220 int (*obj__set_color)(void *obj, int color),
3221 void (*obj__set_percent_color)(void *obj, double percent, bool current),
3222 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
3223 void (*obj__printf)(void *obj, const char *fmt, ...),
3224 void (*obj__write_graph)(void *obj, int graph))
3225
3226{
3227 double percent_max = annotation_line__max_percent(al, notes, percent_type);
3228 int pcnt_width = annotation__pcnt_width(notes),
3229 cycles_width = annotation__cycles_width(notes);
3230 bool show_title = false;
3231 char bf[256];
3232 int printed;
3233
3234 if (first_line && (al->offset == -1 || percent_max == 0.0)) {
3235 if (notes->branch && al->cycles) {
3236 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
3237 show_title = true;
3238 } else
3239 show_title = true;
3240 }
3241
3242 if (al->offset != -1 && percent_max != 0.0) {
3243 int i;
3244
3245 for (i = 0; i < notes->nr_events; i++) {
3246 double percent;
3247
3248 percent = annotation_data__percent(&al->data[i], percent_type);
3249
3250 obj__set_percent_color(obj, percent, current_entry);
3251 if (symbol_conf.show_total_period) {
3252 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
3253 } else if (symbol_conf.show_nr_samples) {
3254 obj__printf(obj, "%6" PRIu64 " ",
3255 al->data[i].he.nr_samples);
3256 } else {
3257 obj__printf(obj, "%6.2f ", percent);
3258 }
3259 }
3260 } else {
3261 obj__set_percent_color(obj, 0, current_entry);
3262
3263 if (!show_title)
3264 obj__printf(obj, "%-*s", pcnt_width, " ");
3265 else {
3266 obj__printf(obj, "%-*s", pcnt_width,
3267 symbol_conf.show_total_period ? "Period" :
3268 symbol_conf.show_nr_samples ? "Samples" : "Percent");
3269 }
3270 }
3271
3272 if (notes->branch) {
3273 if (al->cycles && al->cycles->ipc)
3274 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
3275 else if (!show_title)
3276 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
3277 else
3278 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
3279
3280 if (!annotate_opts.show_minmax_cycle) {
3281 if (al->cycles && al->cycles->avg)
3282 obj__printf(obj, "%*" PRIu64 " ",
3283 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
3284 else if (!show_title)
3285 obj__printf(obj, "%*s",
3286 ANNOTATION__CYCLES_WIDTH, " ");
3287 else
3288 obj__printf(obj, "%*s ",
3289 ANNOTATION__CYCLES_WIDTH - 1,
3290 "Cycle");
3291 } else {
3292 if (al->cycles) {
3293 char str[32];
3294
3295 scnprintf(str, sizeof(str),
3296 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
3297 al->cycles->avg, al->cycles->min,
3298 al->cycles->max);
3299
3300 obj__printf(obj, "%*s ",
3301 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
3302 str);
3303 } else if (!show_title)
3304 obj__printf(obj, "%*s",
3305 ANNOTATION__MINMAX_CYCLES_WIDTH,
3306 " ");
3307 else
3308 obj__printf(obj, "%*s ",
3309 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
3310 "Cycle(min/max)");
3311 }
3312
3313 if (show_title && !*al->line) {
3314 ipc_coverage_string(bf, sizeof(bf), notes);
3315 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
3316 }
3317 }
3318
3319 obj__printf(obj, " ");
3320
3321 if (!*al->line)
3322 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
3323 else if (al->offset == -1) {
3324 if (al->line_nr && annotate_opts.show_linenr)
3325 printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
3326 else
3327 printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " ");
3328 obj__printf(obj, bf);
3329 obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
3330 } else {
3331 u64 addr = al->offset;
3332 int color = -1;
3333
3334 if (!annotate_opts.use_offset)
3335 addr += notes->start;
3336
3337 if (!annotate_opts.use_offset) {
3338 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
3339 } else {
3340 if (al->jump_sources &&
3341 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
3342 if (annotate_opts.show_nr_jumps) {
3343 int prev;
3344 printed = scnprintf(bf, sizeof(bf), "%*d ",
3345 notes->widths.jumps,
3346 al->jump_sources);
3347 prev = obj__set_jumps_percent_color(obj, al->jump_sources,
3348 current_entry);
3349 obj__printf(obj, bf);
3350 obj__set_color(obj, prev);
3351 }
3352print_addr:
3353 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
3354 notes->widths.target, addr);
3355 } else if (ins__is_call(&disasm_line(al)->ins) &&
3356 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
3357 goto print_addr;
3358 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
3359 goto print_addr;
3360 } else {
3361 printed = scnprintf(bf, sizeof(bf), "%-*s ",
3362 notes->widths.addr, " ");
3363 }
3364 }
3365
3366 if (change_color)
3367 color = obj__set_color(obj, HE_COLORSET_ADDR);
3368 obj__printf(obj, bf);
3369 if (change_color)
3370 obj__set_color(obj, color);
3371
3372 disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
3373
3374 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
3375 }
3376
3377}
3378
3379void annotation_line__write(struct annotation_line *al, struct annotation *notes,
3380 struct annotation_write_ops *wops)
3381{
3382 __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
3383 wops->change_color, wops->width, wops->obj,
3384 annotate_opts.percent_type,
3385 wops->set_color, wops->set_percent_color,
3386 wops->set_jumps_percent_color, wops->printf,
3387 wops->write_graph);
3388}
3389
3390int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
3391 struct arch **parch)
3392{
3393 struct symbol *sym = ms->sym;
3394 struct annotation *notes = symbol__annotation(sym);
3395 size_t size = symbol__size(sym);
3396 int nr_pcnt = 1, err;
3397
3398 notes->src->offsets = zalloc(size * sizeof(struct annotation_line *));
3399 if (notes->src->offsets == NULL)
3400 return ENOMEM;
3401
3402 if (evsel__is_group_event(evsel))
3403 nr_pcnt = evsel->core.nr_members;
3404
3405 err = symbol__annotate(ms, evsel, parch);
3406 if (err)
3407 goto out_free_offsets;
3408
3409 symbol__calc_percent(sym, evsel);
3410
3411 annotation__set_offsets(notes, size);
3412 annotation__mark_jump_targets(notes, sym);
3413
3414 err = annotation__compute_ipc(notes, size);
3415 if (err)
3416 goto out_free_offsets;
3417
3418 annotation__init_column_widths(notes, sym);
3419 notes->nr_events = nr_pcnt;
3420
3421 annotation__update_column_widths(notes);
3422 sym->annotate2 = 1;
3423
3424 return 0;
3425
3426out_free_offsets:
3427 zfree(¬es->src->offsets);
3428 return err;
3429}
3430
3431static int annotation__config(const char *var, const char *value, void *data)
3432{
3433 struct annotation_options *opt = data;
3434
3435 if (!strstarts(var, "annotate."))
3436 return 0;
3437
3438 if (!strcmp(var, "annotate.offset_level")) {
3439 perf_config_u8(&opt->offset_level, "offset_level", value);
3440
3441 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
3442 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
3443 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
3444 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
3445 } else if (!strcmp(var, "annotate.hide_src_code")) {
3446 opt->hide_src_code = perf_config_bool("hide_src_code", value);
3447 } else if (!strcmp(var, "annotate.jump_arrows")) {
3448 opt->jump_arrows = perf_config_bool("jump_arrows", value);
3449 } else if (!strcmp(var, "annotate.show_linenr")) {
3450 opt->show_linenr = perf_config_bool("show_linenr", value);
3451 } else if (!strcmp(var, "annotate.show_nr_jumps")) {
3452 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
3453 } else if (!strcmp(var, "annotate.show_nr_samples")) {
3454 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
3455 value);
3456 } else if (!strcmp(var, "annotate.show_total_period")) {
3457 symbol_conf.show_total_period = perf_config_bool("show_total_period",
3458 value);
3459 } else if (!strcmp(var, "annotate.use_offset")) {
3460 opt->use_offset = perf_config_bool("use_offset", value);
3461 } else if (!strcmp(var, "annotate.disassembler_style")) {
3462 opt->disassembler_style = strdup(value);
3463 if (!opt->disassembler_style) {
3464 pr_err("Not enough memory for annotate.disassembler_style\n");
3465 return -1;
3466 }
3467 } else if (!strcmp(var, "annotate.objdump")) {
3468 opt->objdump_path = strdup(value);
3469 if (!opt->objdump_path) {
3470 pr_err("Not enough memory for annotate.objdump\n");
3471 return -1;
3472 }
3473 } else if (!strcmp(var, "annotate.addr2line")) {
3474 symbol_conf.addr2line_path = strdup(value);
3475 if (!symbol_conf.addr2line_path) {
3476 pr_err("Not enough memory for annotate.addr2line\n");
3477 return -1;
3478 }
3479 } else if (!strcmp(var, "annotate.demangle")) {
3480 symbol_conf.demangle = perf_config_bool("demangle", value);
3481 } else if (!strcmp(var, "annotate.demangle_kernel")) {
3482 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
3483 } else {
3484 pr_debug("%s variable unknown, ignoring...", var);
3485 }
3486
3487 return 0;
3488}
3489
3490void annotation_options__init(void)
3491{
3492 struct annotation_options *opt = &annotate_opts;
3493
3494 memset(opt, 0, sizeof(*opt));
3495
3496 /* Default values. */
3497 opt->use_offset = true;
3498 opt->jump_arrows = true;
3499 opt->annotate_src = true;
3500 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
3501 opt->percent_type = PERCENT_PERIOD_LOCAL;
3502}
3503
3504void annotation_options__exit(void)
3505{
3506 zfree(&annotate_opts.disassembler_style);
3507 zfree(&annotate_opts.objdump_path);
3508}
3509
3510void annotation_config__init(void)
3511{
3512 perf_config(annotation__config, &annotate_opts);
3513}
3514
3515static unsigned int parse_percent_type(char *str1, char *str2)
3516{
3517 unsigned int type = (unsigned int) -1;
3518
3519 if (!strcmp("period", str1)) {
3520 if (!strcmp("local", str2))
3521 type = PERCENT_PERIOD_LOCAL;
3522 else if (!strcmp("global", str2))
3523 type = PERCENT_PERIOD_GLOBAL;
3524 }
3525
3526 if (!strcmp("hits", str1)) {
3527 if (!strcmp("local", str2))
3528 type = PERCENT_HITS_LOCAL;
3529 else if (!strcmp("global", str2))
3530 type = PERCENT_HITS_GLOBAL;
3531 }
3532
3533 return type;
3534}
3535
3536int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
3537 int unset __maybe_unused)
3538{
3539 unsigned int type;
3540 char *str1, *str2;
3541 int err = -1;
3542
3543 str1 = strdup(_str);
3544 if (!str1)
3545 return -ENOMEM;
3546
3547 str2 = strchr(str1, '-');
3548 if (!str2)
3549 goto out;
3550
3551 *str2++ = 0;
3552
3553 type = parse_percent_type(str1, str2);
3554 if (type == (unsigned int) -1)
3555 type = parse_percent_type(str2, str1);
3556 if (type != (unsigned int) -1) {
3557 annotate_opts.percent_type = type;
3558 err = 0;
3559 }
3560
3561out:
3562 free(str1);
3563 return err;
3564}
3565
3566int annotate_check_args(void)
3567{
3568 struct annotation_options *args = &annotate_opts;
3569
3570 if (args->prefix_strip && !args->prefix) {
3571 pr_err("--prefix-strip requires --prefix\n");
3572 return -1;
3573 }
3574 return 0;
3575}
3576
3577/*
3578 * Get register number and access offset from the given instruction.
3579 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
3580 * to revisit the format when it handles different architecture.
3581 * Fills @reg and @offset when return 0.
3582 */
3583static int extract_reg_offset(struct arch *arch, const char *str,
3584 struct annotated_op_loc *op_loc)
3585{
3586 char *p;
3587 char *regname;
3588
3589 if (arch->objdump.register_char == 0)
3590 return -1;
3591
3592 /*
3593 * It should start from offset, but it's possible to skip 0
3594 * in the asm. So 0(%rax) should be same as (%rax).
3595 *
3596 * However, it also start with a segment select register like
3597 * %gs:0x18(%rbx). In that case it should skip the part.
3598 */
3599 if (*str == arch->objdump.register_char) {
3600 while (*str && !isdigit(*str) &&
3601 *str != arch->objdump.memory_ref_char)
3602 str++;
3603 }
3604
3605 op_loc->offset = strtol(str, &p, 0);
3606
3607 p = strchr(p, arch->objdump.register_char);
3608 if (p == NULL)
3609 return -1;
3610
3611 regname = strdup(p);
3612 if (regname == NULL)
3613 return -1;
3614
3615 op_loc->reg1 = get_dwarf_regnum(regname, 0);
3616 free(regname);
3617
3618 /* Get the second register */
3619 if (op_loc->multi_regs) {
3620 p = strchr(p + 1, arch->objdump.register_char);
3621 if (p == NULL)
3622 return -1;
3623
3624 regname = strdup(p);
3625 if (regname == NULL)
3626 return -1;
3627
3628 op_loc->reg2 = get_dwarf_regnum(regname, 0);
3629 free(regname);
3630 }
3631 return 0;
3632}
3633
3634/**
3635 * annotate_get_insn_location - Get location of instruction
3636 * @arch: the architecture info
3637 * @dl: the target instruction
3638 * @loc: a buffer to save the data
3639 *
3640 * Get detailed location info (register and offset) in the instruction.
3641 * It needs both source and target operand and whether it accesses a
3642 * memory location. The offset field is meaningful only when the
3643 * corresponding mem flag is set. The reg2 field is meaningful only
3644 * when multi_regs flag is set.
3645 *
3646 * Some examples on x86:
3647 *
3648 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0
3649 * # dst_reg1 = rcx, dst_mem = 0
3650 *
3651 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
3652 * # dst_reg1 = r8, dst_mem = 0
3653 *
3654 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, dst_multi_regs = 0
3655 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
3656 * # dst_multi_regs = 1, dst_offset = 8
3657 */
3658int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
3659 struct annotated_insn_loc *loc)
3660{
3661 struct ins_operands *ops;
3662 struct annotated_op_loc *op_loc;
3663 int i;
3664
3665 if (!strcmp(dl->ins.name, "lock"))
3666 ops = dl->ops.locked.ops;
3667 else
3668 ops = &dl->ops;
3669
3670 if (ops == NULL)
3671 return -1;
3672
3673 memset(loc, 0, sizeof(*loc));
3674
3675 for_each_insn_op_loc(loc, i, op_loc) {
3676 const char *insn_str = ops->source.raw;
3677 bool multi_regs = ops->source.multi_regs;
3678
3679 if (i == INSN_OP_TARGET) {
3680 insn_str = ops->target.raw;
3681 multi_regs = ops->target.multi_regs;
3682 }
3683
3684 /* Invalidate the register by default */
3685 op_loc->reg1 = -1;
3686 op_loc->reg2 = -1;
3687
3688 if (insn_str == NULL)
3689 continue;
3690
3691 if (strchr(insn_str, arch->objdump.memory_ref_char)) {
3692 op_loc->mem_ref = true;
3693 op_loc->multi_regs = multi_regs;
3694 extract_reg_offset(arch, insn_str, op_loc);
3695 } else {
3696 char *s = strdup(insn_str);
3697
3698 if (s) {
3699 op_loc->reg1 = get_dwarf_regnum(s, 0);
3700 free(s);
3701 }
3702 }
3703 }
3704
3705 return 0;
3706}
3707
3708static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel)
3709{
3710 struct disasm_line *dl, *tmp_dl;
3711 struct annotation *notes;
3712
3713 notes = symbol__annotation(ms->sym);
3714 if (!list_empty(¬es->src->source))
3715 return;
3716
3717 if (symbol__annotate(ms, evsel, NULL) < 0)
3718 return;
3719
3720 /* remove non-insn disasm lines for simplicity */
3721 list_for_each_entry_safe(dl, tmp_dl, ¬es->src->source, al.node) {
3722 if (dl->al.offset == -1) {
3723 list_del(&dl->al.node);
3724 free(dl);
3725 }
3726 }
3727}
3728
3729static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip)
3730{
3731 struct disasm_line *dl;
3732 struct annotation *notes;
3733
3734 notes = symbol__annotation(sym);
3735
3736 list_for_each_entry(dl, ¬es->src->source, al.node) {
3737 if (sym->start + dl->al.offset == ip) {
3738 /*
3739 * llvm-objdump places "lock" in a separate line and
3740 * in that case, we want to get the next line.
3741 */
3742 if (!strcmp(dl->ins.name, "lock") && *dl->ops.raw == '\0') {
3743 ip++;
3744 continue;
3745 }
3746 return dl;
3747 }
3748 }
3749 return NULL;
3750}
3751
3752static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
3753 const char *name)
3754{
3755 struct annotated_item_stat *istat;
3756
3757 list_for_each_entry(istat, head, list) {
3758 if (!strcmp(istat->name, name))
3759 return istat;
3760 }
3761
3762 istat = zalloc(sizeof(*istat));
3763 if (istat == NULL)
3764 return NULL;
3765
3766 istat->name = strdup(name);
3767 if (istat->name == NULL) {
3768 free(istat);
3769 return NULL;
3770 }
3771
3772 list_add_tail(&istat->list, head);
3773 return istat;
3774}
3775
3776static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
3777{
3778 if (arch__is(arch, "x86")) {
3779 if (!strncmp(dl->ins.name, "push", 4) ||
3780 !strncmp(dl->ins.name, "pop", 3) ||
3781 !strncmp(dl->ins.name, "ret", 3))
3782 return true;
3783 }
3784
3785 return false;
3786}
3787
3788u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
3789 struct disasm_line *dl)
3790{
3791 struct annotation *notes;
3792 struct disasm_line *next;
3793 u64 addr;
3794
3795 notes = symbol__annotation(ms->sym);
3796 /*
3797 * PC-relative addressing starts from the next instruction address
3798 * But the IP is for the current instruction. Since disasm_line
3799 * doesn't have the instruction size, calculate it using the next
3800 * disasm_line. If it's the last one, we can use symbol's end
3801 * address directly.
3802 */
3803 if (&dl->al.node == notes->src->source.prev)
3804 addr = ms->sym->end + offset;
3805 else {
3806 next = list_next_entry(dl, al.node);
3807 addr = ip + (next->al.offset - dl->al.offset) + offset;
3808 }
3809 return map__rip_2objdump(ms->map, addr);
3810}
3811
3812/**
3813 * hist_entry__get_data_type - find data type for given hist entry
3814 * @he: hist entry
3815 *
3816 * This function first annotates the instruction at @he->ip and extracts
3817 * register and offset info from it. Then it searches the DWARF debug
3818 * info to get a variable and type information using the address, register,
3819 * and offset.
3820 */
3821struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
3822{
3823 struct map_symbol *ms = &he->ms;
3824 struct evsel *evsel = hists_to_evsel(he->hists);
3825 struct arch *arch;
3826 struct disasm_line *dl;
3827 struct annotated_insn_loc loc;
3828 struct annotated_op_loc *op_loc;
3829 struct annotated_data_type *mem_type;
3830 struct annotated_item_stat *istat;
3831 u64 ip = he->ip, addr = 0;
3832 const char *var_name = NULL;
3833 int var_offset;
3834 int i;
3835
3836 ann_data_stat.total++;
3837
3838 if (ms->map == NULL || ms->sym == NULL) {
3839 ann_data_stat.no_sym++;
3840 return NULL;
3841 }
3842
3843 if (!symbol_conf.init_annotation) {
3844 ann_data_stat.no_sym++;
3845 return NULL;
3846 }
3847
3848 if (evsel__get_arch(evsel, &arch) < 0) {
3849 ann_data_stat.no_insn++;
3850 return NULL;
3851 }
3852
3853 /* Make sure it runs objdump to get disasm of the function */
3854 symbol__ensure_annotate(ms, evsel);
3855
3856 /*
3857 * Get a disasm to extract the location from the insn.
3858 * This is too slow...
3859 */
3860 dl = find_disasm_line(ms->sym, ip);
3861 if (dl == NULL) {
3862 ann_data_stat.no_insn++;
3863 return NULL;
3864 }
3865
3866retry:
3867 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
3868 if (istat == NULL) {
3869 ann_data_stat.no_insn++;
3870 return NULL;
3871 }
3872
3873 if (annotate_get_insn_location(arch, dl, &loc) < 0) {
3874 ann_data_stat.no_insn_ops++;
3875 istat->bad++;
3876 return NULL;
3877 }
3878
3879 if (is_stack_operation(arch, dl)) {
3880 istat->good++;
3881 he->mem_type_off = 0;
3882 return &stackop_type;
3883 }
3884
3885 for_each_insn_op_loc(&loc, i, op_loc) {
3886 if (!op_loc->mem_ref)
3887 continue;
3888
3889 /* Recalculate IP because of LOCK prefix or insn fusion */
3890 ip = ms->sym->start + dl->al.offset;
3891
3892 var_offset = op_loc->offset;
3893
3894 /* PC-relative addressing */
3895 if (op_loc->reg1 == DWARF_REG_PC) {
3896 struct addr_location al;
3897 struct symbol *var;
3898 u64 map_addr;
3899
3900 addr = annotate_calc_pcrel(ms, ip, op_loc->offset, dl);
3901 /* Kernel symbols might be relocated */
3902 map_addr = addr + map__reloc(ms->map);
3903
3904 addr_location__init(&al);
3905 var = thread__find_symbol_fb(he->thread, he->cpumode,
3906 map_addr, &al);
3907 if (var) {
3908 var_name = var->name;
3909 /* Calculate type offset from the start of variable */
3910 var_offset = map_addr - map__unmap_ip(al.map, var->start);
3911 }
3912 addr_location__exit(&al);
3913 }
3914
3915 mem_type = find_data_type(ms, ip, op_loc, addr, var_name);
3916 if (mem_type)
3917 istat->good++;
3918 else
3919 istat->bad++;
3920
3921 if (mem_type && var_name)
3922 op_loc->offset = var_offset;
3923
3924 if (symbol_conf.annotate_data_sample) {
3925 annotated_data_type__update_samples(mem_type, evsel,
3926 op_loc->offset,
3927 he->stat.nr_events,
3928 he->stat.period);
3929 }
3930 he->mem_type_off = op_loc->offset;
3931 return mem_type;
3932 }
3933
3934 /*
3935 * Some instructions can be fused and the actual memory access came
3936 * from the previous instruction.
3937 */
3938 if (dl->al.offset > 0) {
3939 struct disasm_line *prev_dl;
3940
3941 prev_dl = list_prev_entry(dl, al.node);
3942 if (ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
3943 dl = prev_dl;
3944 goto retry;
3945 }
3946 }
3947
3948 ann_data_stat.no_mem_ops++;
3949 istat->bad++;
3950 return NULL;
3951}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-annotate.c, see those files for further
6 * copyright notes.
7 */
8
9#include <errno.h>
10#include <inttypes.h>
11#include <libgen.h>
12#include <stdlib.h>
13#include "util.h" // hex_width()
14#include "ui/ui.h"
15#include "sort.h"
16#include "build-id.h"
17#include "color.h"
18#include "config.h"
19#include "disasm.h"
20#include "dso.h"
21#include "env.h"
22#include "map.h"
23#include "maps.h"
24#include "symbol.h"
25#include "srcline.h"
26#include "units.h"
27#include "debug.h"
28#include "debuginfo.h"
29#include "annotate.h"
30#include "annotate-data.h"
31#include "evsel.h"
32#include "evlist.h"
33#include "bpf-event.h"
34#include "bpf-utils.h"
35#include "block-range.h"
36#include "string2.h"
37#include "dwarf-regs.h"
38#include "util/event.h"
39#include "util/sharded_mutex.h"
40#include "arch/common.h"
41#include "namespaces.h"
42#include "thread.h"
43#include "hashmap.h"
44#include "strbuf.h"
45#include <regex.h>
46#include <linux/bitops.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/zalloc.h>
50#include <subcmd/parse-options.h>
51#include <subcmd/run-command.h>
52#include <math.h>
53
54/* FIXME: For the HE_COLORSET */
55#include "ui/browser.h"
56
57/*
58 * FIXME: Using the same values as slang.h,
59 * but that header may not be available everywhere
60 */
61#define LARROW_CHAR ((unsigned char)',')
62#define RARROW_CHAR ((unsigned char)'+')
63#define DARROW_CHAR ((unsigned char)'.')
64#define UARROW_CHAR ((unsigned char)'-')
65
66#include <linux/ctype.h>
67
68/* global annotation options */
69struct annotation_options annotate_opts;
70
71/* Data type collection debug statistics */
72struct annotated_data_stat ann_data_stat;
73LIST_HEAD(ann_insn_stat);
74
75/* Pseudo data types */
76struct annotated_data_type stackop_type = {
77 .self = {
78 .type_name = (char *)"(stack operation)",
79 .children = LIST_HEAD_INIT(stackop_type.self.children),
80 },
81};
82
83struct annotated_data_type canary_type = {
84 .self = {
85 .type_name = (char *)"(stack canary)",
86 .children = LIST_HEAD_INIT(canary_type.self.children),
87 },
88};
89
90/* symbol histogram: key = offset << 16 | evsel->core.idx */
91static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
92{
93 return (key >> 16) + (key & 0xffff);
94}
95
96static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
97{
98 return key1 == key2;
99}
100
101static struct annotated_source *annotated_source__new(void)
102{
103 struct annotated_source *src = zalloc(sizeof(*src));
104
105 if (src != NULL)
106 INIT_LIST_HEAD(&src->source);
107
108 return src;
109}
110
111static __maybe_unused void annotated_source__delete(struct annotated_source *src)
112{
113 struct hashmap_entry *cur;
114 size_t bkt;
115
116 if (src == NULL)
117 return;
118
119 if (src->samples) {
120 hashmap__for_each_entry(src->samples, cur, bkt)
121 zfree(&cur->pvalue);
122 hashmap__free(src->samples);
123 }
124 zfree(&src->histograms);
125 free(src);
126}
127
128static int annotated_source__alloc_histograms(struct annotated_source *src,
129 int nr_hists)
130{
131 src->nr_histograms = nr_hists;
132 src->histograms = calloc(nr_hists, sizeof(*src->histograms));
133
134 if (src->histograms == NULL)
135 return -1;
136
137 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
138 if (src->samples == NULL)
139 zfree(&src->histograms);
140
141 return src->histograms ? 0 : -1;
142}
143
144void symbol__annotate_zero_histograms(struct symbol *sym)
145{
146 struct annotation *notes = symbol__annotation(sym);
147
148 annotation__lock(notes);
149 if (notes->src != NULL) {
150 memset(notes->src->histograms, 0,
151 notes->src->nr_histograms * sizeof(*notes->src->histograms));
152 hashmap__clear(notes->src->samples);
153 }
154 if (notes->branch && notes->branch->cycles_hist) {
155 memset(notes->branch->cycles_hist, 0,
156 symbol__size(sym) * sizeof(struct cyc_hist));
157 }
158 annotation__unlock(notes);
159}
160
161static int __symbol__account_cycles(struct cyc_hist *ch,
162 u64 start,
163 unsigned offset, unsigned cycles,
164 unsigned have_start)
165{
166 /*
167 * For now we can only account one basic block per
168 * final jump. But multiple could be overlapping.
169 * Always account the longest one. So when
170 * a shorter one has been already seen throw it away.
171 *
172 * We separately always account the full cycles.
173 */
174 ch[offset].num_aggr++;
175 ch[offset].cycles_aggr += cycles;
176
177 if (cycles > ch[offset].cycles_max)
178 ch[offset].cycles_max = cycles;
179
180 if (ch[offset].cycles_min) {
181 if (cycles && cycles < ch[offset].cycles_min)
182 ch[offset].cycles_min = cycles;
183 } else
184 ch[offset].cycles_min = cycles;
185
186 if (!have_start && ch[offset].have_start)
187 return 0;
188 if (ch[offset].num) {
189 if (have_start && (!ch[offset].have_start ||
190 ch[offset].start > start)) {
191 ch[offset].have_start = 0;
192 ch[offset].cycles = 0;
193 ch[offset].num = 0;
194 if (ch[offset].reset < 0xffff)
195 ch[offset].reset++;
196 } else if (have_start &&
197 ch[offset].start < start)
198 return 0;
199 }
200
201 if (ch[offset].num < NUM_SPARKS)
202 ch[offset].cycles_spark[ch[offset].num] = cycles;
203
204 ch[offset].have_start = have_start;
205 ch[offset].start = start;
206 ch[offset].cycles += cycles;
207 ch[offset].num++;
208 return 0;
209}
210
211static int __symbol__inc_addr_samples(struct map_symbol *ms,
212 struct annotated_source *src, int evidx, u64 addr,
213 struct perf_sample *sample)
214{
215 struct symbol *sym = ms->sym;
216 long hash_key;
217 u64 offset;
218 struct sym_hist *h;
219 struct sym_hist_entry *entry;
220
221 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
222
223 if ((addr < sym->start || addr >= sym->end) &&
224 (addr != sym->end || sym->start != sym->end)) {
225 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
226 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
227 return -ERANGE;
228 }
229
230 offset = addr - sym->start;
231 h = annotated_source__histogram(src, evidx);
232 if (h == NULL) {
233 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
234 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
235 return -ENOMEM;
236 }
237
238 hash_key = offset << 16 | evidx;
239 if (!hashmap__find(src->samples, hash_key, &entry)) {
240 entry = zalloc(sizeof(*entry));
241 if (entry == NULL)
242 return -ENOMEM;
243
244 if (hashmap__add(src->samples, hash_key, entry) < 0)
245 return -ENOMEM;
246 }
247
248 h->nr_samples++;
249 h->period += sample->period;
250 entry->nr_samples++;
251 entry->period += sample->period;
252
253 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
254 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
255 sym->start, sym->name, addr, addr - sym->start, evidx,
256 entry->nr_samples, entry->period);
257 return 0;
258}
259
260struct annotated_branch *annotation__get_branch(struct annotation *notes)
261{
262 if (notes == NULL)
263 return NULL;
264
265 if (notes->branch == NULL)
266 notes->branch = zalloc(sizeof(*notes->branch));
267
268 return notes->branch;
269}
270
271static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym,
272 unsigned int br_cntr_nr)
273{
274 struct annotation *notes = symbol__annotation(sym);
275 struct annotated_branch *branch;
276 const size_t size = symbol__size(sym);
277
278 branch = annotation__get_branch(notes);
279 if (branch == NULL)
280 return NULL;
281
282 if (branch->cycles_hist == NULL) {
283 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
284 if (!branch->cycles_hist)
285 return NULL;
286 }
287
288 if (br_cntr_nr && branch->br_cntr == NULL) {
289 branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64));
290 if (!branch->br_cntr)
291 return NULL;
292 }
293
294 return branch;
295}
296
297struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
298{
299 struct annotation *notes = symbol__annotation(sym);
300
301 if (notes->src == NULL) {
302 notes->src = annotated_source__new();
303 if (notes->src == NULL)
304 return NULL;
305 goto alloc_histograms;
306 }
307
308 if (notes->src->histograms == NULL) {
309alloc_histograms:
310 annotated_source__alloc_histograms(notes->src, nr_hists);
311 }
312
313 return notes->src;
314}
315
316static int symbol__inc_addr_samples(struct map_symbol *ms,
317 struct evsel *evsel, u64 addr,
318 struct perf_sample *sample)
319{
320 struct symbol *sym = ms->sym;
321 struct annotated_source *src;
322
323 if (sym == NULL)
324 return 0;
325 src = symbol__hists(sym, evsel->evlist->core.nr_entries);
326 return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
327}
328
329static int symbol__account_br_cntr(struct annotated_branch *branch,
330 struct evsel *evsel,
331 unsigned offset,
332 u64 br_cntr)
333{
334 unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr;
335 unsigned int base = evsel__leader(evsel)->br_cntr_idx;
336 unsigned int off = offset * evsel->evlist->nr_br_cntr;
337 u64 *branch_br_cntr = branch->br_cntr;
338 unsigned int i, mask, width;
339
340 if (!br_cntr || !branch_br_cntr)
341 return 0;
342
343 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
344 mask = (1L << width) - 1;
345 for (i = 0; i < br_cntr_nr; i++) {
346 u64 cntr = (br_cntr >> i * width) & mask;
347
348 branch_br_cntr[off + i + base] += cntr;
349 if (cntr == mask)
350 branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG;
351 }
352
353 return 0;
354}
355
356static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym,
357 unsigned cycles, struct evsel *evsel,
358 u64 br_cntr)
359{
360 struct annotated_branch *branch;
361 unsigned offset;
362 int ret;
363
364 if (sym == NULL)
365 return 0;
366 branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr);
367 if (!branch)
368 return -ENOMEM;
369 if (addr < sym->start || addr >= sym->end)
370 return -ERANGE;
371
372 if (start) {
373 if (start < sym->start || start >= sym->end)
374 return -ERANGE;
375 if (start >= addr)
376 start = 0;
377 }
378 offset = addr - sym->start;
379 ret = __symbol__account_cycles(branch->cycles_hist,
380 start ? start - sym->start : 0,
381 offset, cycles,
382 !!start);
383
384 if (ret)
385 return ret;
386
387 return symbol__account_br_cntr(branch, evsel, offset, br_cntr);
388}
389
390int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
391 struct addr_map_symbol *start,
392 unsigned cycles,
393 struct evsel *evsel,
394 u64 br_cntr)
395{
396 u64 saddr = 0;
397 int err;
398
399 if (!cycles)
400 return 0;
401
402 /*
403 * Only set start when IPC can be computed. We can only
404 * compute it when the basic block is completely in a single
405 * function.
406 * Special case the case when the jump is elsewhere, but
407 * it starts on the function start.
408 */
409 if (start &&
410 (start->ms.sym == ams->ms.sym ||
411 (ams->ms.sym &&
412 start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
413 saddr = start->al_addr;
414 if (saddr == 0)
415 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
416 ams->addr,
417 start ? start->addr : 0,
418 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
419 saddr);
420 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr);
421 if (err)
422 pr_debug2("account_cycles failed %d\n", err);
423 return err;
424}
425
426struct annotation_line *annotated_source__get_line(struct annotated_source *src,
427 s64 offset)
428{
429 struct annotation_line *al;
430
431 list_for_each_entry(al, &src->source, node) {
432 if (al->offset == offset)
433 return al;
434 }
435 return NULL;
436}
437
438static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
439{
440 struct annotation_line *al;
441 unsigned n_insn = 0;
442
443 al = annotated_source__get_line(notes->src, start);
444 if (al == NULL)
445 return 0;
446
447 list_for_each_entry_from(al, ¬es->src->source, node) {
448 if (al->offset == -1)
449 continue;
450 if ((u64)al->offset > end)
451 break;
452 n_insn++;
453 }
454 return n_insn;
455}
456
457static void annotated_branch__delete(struct annotated_branch *branch)
458{
459 if (branch) {
460 zfree(&branch->cycles_hist);
461 free(branch->br_cntr);
462 free(branch);
463 }
464}
465
466static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
467{
468 unsigned n_insn;
469 unsigned int cover_insn = 0;
470
471 n_insn = annotation__count_insn(notes, start, end);
472 if (n_insn && ch->num && ch->cycles) {
473 struct annotation_line *al;
474 struct annotated_branch *branch;
475 float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
476
477 /* Hide data when there are too many overlaps. */
478 if (ch->reset >= 0x7fff)
479 return;
480
481 al = annotated_source__get_line(notes->src, start);
482 if (al == NULL)
483 return;
484
485 list_for_each_entry_from(al, ¬es->src->source, node) {
486 if (al->offset == -1)
487 continue;
488 if ((u64)al->offset > end)
489 break;
490 if (al->cycles && al->cycles->ipc == 0.0) {
491 al->cycles->ipc = ipc;
492 cover_insn++;
493 }
494 }
495
496 branch = annotation__get_branch(notes);
497 if (cover_insn && branch) {
498 branch->hit_cycles += ch->cycles;
499 branch->hit_insn += n_insn * ch->num;
500 branch->cover_insn += cover_insn;
501 }
502 }
503}
504
505static int annotation__compute_ipc(struct annotation *notes, size_t size,
506 struct evsel *evsel)
507{
508 unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr;
509 int err = 0;
510 s64 offset;
511
512 if (!notes->branch || !notes->branch->cycles_hist)
513 return 0;
514
515 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
516 notes->branch->hit_cycles = 0;
517 notes->branch->hit_insn = 0;
518 notes->branch->cover_insn = 0;
519
520 annotation__lock(notes);
521 for (offset = size - 1; offset >= 0; --offset) {
522 struct cyc_hist *ch;
523
524 ch = ¬es->branch->cycles_hist[offset];
525 if (ch && ch->cycles) {
526 struct annotation_line *al;
527
528 al = annotated_source__get_line(notes->src, offset);
529 if (al && al->cycles == NULL) {
530 al->cycles = zalloc(sizeof(*al->cycles));
531 if (al->cycles == NULL) {
532 err = ENOMEM;
533 break;
534 }
535 }
536 if (ch->have_start)
537 annotation__count_and_fill(notes, ch->start, offset, ch);
538 if (al && ch->num_aggr) {
539 al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
540 al->cycles->max = ch->cycles_max;
541 al->cycles->min = ch->cycles_min;
542 }
543 if (al && notes->branch->br_cntr) {
544 if (!al->br_cntr) {
545 al->br_cntr = calloc(br_cntr_nr, sizeof(u64));
546 if (!al->br_cntr) {
547 err = ENOMEM;
548 break;
549 }
550 }
551 al->num_aggr = ch->num_aggr;
552 al->br_cntr_nr = br_cntr_nr;
553 al->evsel = evsel;
554 memcpy(al->br_cntr, ¬es->branch->br_cntr[offset * br_cntr_nr],
555 br_cntr_nr * sizeof(u64));
556 }
557 }
558 }
559
560 if (err) {
561 while (++offset < (s64)size) {
562 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
563
564 if (ch && ch->cycles) {
565 struct annotation_line *al;
566
567 al = annotated_source__get_line(notes->src, offset);
568 if (al) {
569 zfree(&al->cycles);
570 zfree(&al->br_cntr);
571 }
572 }
573 }
574 }
575
576 annotation__unlock(notes);
577 return 0;
578}
579
580int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
581 struct evsel *evsel)
582{
583 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
584}
585
586int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
587 struct evsel *evsel, u64 ip)
588{
589 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
590}
591
592
593void annotation__exit(struct annotation *notes)
594{
595 annotated_source__delete(notes->src);
596 annotated_branch__delete(notes->branch);
597}
598
599static struct sharded_mutex *sharded_mutex;
600
601static void annotation__init_sharded_mutex(void)
602{
603 /* As many mutexes as there are CPUs. */
604 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
605}
606
607static size_t annotation__hash(const struct annotation *notes)
608{
609 return (size_t)notes;
610}
611
612static struct mutex *annotation__get_mutex(const struct annotation *notes)
613{
614 static pthread_once_t once = PTHREAD_ONCE_INIT;
615
616 pthread_once(&once, annotation__init_sharded_mutex);
617 if (!sharded_mutex)
618 return NULL;
619
620 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
621}
622
623void annotation__lock(struct annotation *notes)
624 NO_THREAD_SAFETY_ANALYSIS
625{
626 struct mutex *mutex = annotation__get_mutex(notes);
627
628 if (mutex)
629 mutex_lock(mutex);
630}
631
632void annotation__unlock(struct annotation *notes)
633 NO_THREAD_SAFETY_ANALYSIS
634{
635 struct mutex *mutex = annotation__get_mutex(notes);
636
637 if (mutex)
638 mutex_unlock(mutex);
639}
640
641bool annotation__trylock(struct annotation *notes)
642{
643 struct mutex *mutex = annotation__get_mutex(notes);
644
645 if (!mutex)
646 return false;
647
648 return mutex_trylock(mutex);
649}
650
651void annotation_line__add(struct annotation_line *al, struct list_head *head)
652{
653 list_add_tail(&al->node, head);
654}
655
656struct annotation_line *
657annotation_line__next(struct annotation_line *pos, struct list_head *head)
658{
659 list_for_each_entry_continue(pos, head, node)
660 if (pos->offset >= 0)
661 return pos;
662
663 return NULL;
664}
665
666static const char *annotate__address_color(struct block_range *br)
667{
668 double cov = block_range__coverage(br);
669
670 if (cov >= 0) {
671 /* mark red for >75% coverage */
672 if (cov > 0.75)
673 return PERF_COLOR_RED;
674
675 /* mark dull for <1% coverage */
676 if (cov < 0.01)
677 return PERF_COLOR_NORMAL;
678 }
679
680 return PERF_COLOR_MAGENTA;
681}
682
683static const char *annotate__asm_color(struct block_range *br)
684{
685 double cov = block_range__coverage(br);
686
687 if (cov >= 0) {
688 /* mark dull for <1% coverage */
689 if (cov < 0.01)
690 return PERF_COLOR_NORMAL;
691 }
692
693 return PERF_COLOR_BLUE;
694}
695
696static void annotate__branch_printf(struct block_range *br, u64 addr)
697{
698 bool emit_comment = true;
699
700 if (!br)
701 return;
702
703#if 1
704 if (br->is_target && br->start == addr) {
705 struct block_range *branch = br;
706 double p;
707
708 /*
709 * Find matching branch to our target.
710 */
711 while (!branch->is_branch)
712 branch = block_range__next(branch);
713
714 p = 100 *(double)br->entry / branch->coverage;
715
716 if (p > 0.1) {
717 if (emit_comment) {
718 emit_comment = false;
719 printf("\t#");
720 }
721
722 /*
723 * The percentage of coverage joined at this target in relation
724 * to the next branch.
725 */
726 printf(" +%.2f%%", p);
727 }
728 }
729#endif
730 if (br->is_branch && br->end == addr) {
731 double p = 100*(double)br->taken / br->coverage;
732
733 if (p > 0.1) {
734 if (emit_comment) {
735 emit_comment = false;
736 printf("\t#");
737 }
738
739 /*
740 * The percentage of coverage leaving at this branch, and
741 * its prediction ratio.
742 */
743 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
744 }
745 }
746}
747
748static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
749{
750 s64 offset = dl->al.offset;
751 const u64 addr = start + offset;
752 struct block_range *br;
753
754 br = block_range__find(addr);
755 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
756 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
757 annotate__branch_printf(br, addr);
758 return 0;
759}
760
761static int
762annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
763 struct evsel *evsel, u64 len, int min_pcnt, int printed,
764 int max_lines, struct annotation_line *queue, int addr_fmt_width,
765 int percent_type)
766{
767 struct disasm_line *dl = container_of(al, struct disasm_line, al);
768 struct annotation *notes = symbol__annotation(sym);
769 static const char *prev_line;
770
771 if (al->offset != -1) {
772 double max_percent = 0.0;
773 int i, nr_percent = 1;
774 const char *color;
775
776 for (i = 0; i < al->data_nr; i++) {
777 double percent;
778
779 percent = annotation_data__percent(&al->data[i],
780 percent_type);
781
782 if (percent > max_percent)
783 max_percent = percent;
784 }
785
786 if (al->data_nr > nr_percent)
787 nr_percent = al->data_nr;
788
789 if (max_percent < min_pcnt)
790 return -1;
791
792 if (max_lines && printed >= max_lines)
793 return 1;
794
795 if (queue != NULL) {
796 list_for_each_entry_from(queue, ¬es->src->source, node) {
797 if (queue == al)
798 break;
799 annotation_line__print(queue, sym, start, evsel, len,
800 0, 0, 1, NULL, addr_fmt_width,
801 percent_type);
802 }
803 }
804
805 color = get_percent_color(max_percent);
806
807 for (i = 0; i < nr_percent; i++) {
808 struct annotation_data *data = &al->data[i];
809 double percent;
810
811 percent = annotation_data__percent(data, percent_type);
812 color = get_percent_color(percent);
813
814 if (symbol_conf.show_total_period)
815 color_fprintf(stdout, color, " %11" PRIu64,
816 data->he.period);
817 else if (symbol_conf.show_nr_samples)
818 color_fprintf(stdout, color, " %7" PRIu64,
819 data->he.nr_samples);
820 else
821 color_fprintf(stdout, color, " %7.2f", percent);
822 }
823
824 printf(" : ");
825
826 disasm_line__print(dl, start, addr_fmt_width);
827
828 /*
829 * Also color the filename and line if needed, with
830 * the same color than the percentage. Don't print it
831 * twice for close colored addr with the same filename:line
832 */
833 if (al->path) {
834 if (!prev_line || strcmp(prev_line, al->path)) {
835 color_fprintf(stdout, color, " // %s", al->path);
836 prev_line = al->path;
837 }
838 }
839
840 printf("\n");
841 } else if (max_lines && printed >= max_lines)
842 return 1;
843 else {
844 int width = annotation__pcnt_width(notes);
845
846 if (queue)
847 return -1;
848
849 if (!*al->line)
850 printf(" %*s:\n", width, " ");
851 else
852 printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
853 }
854
855 return 0;
856}
857
858static void calc_percent(struct annotation *notes,
859 struct evsel *evsel,
860 struct annotation_data *data,
861 s64 offset, s64 end)
862{
863 struct hists *hists = evsel__hists(evsel);
864 int evidx = evsel->core.idx;
865 struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
866 unsigned int hits = 0;
867 u64 period = 0;
868
869 while (offset < end) {
870 struct sym_hist_entry *entry;
871
872 entry = annotated_source__hist_entry(notes->src, evidx, offset);
873 if (entry) {
874 hits += entry->nr_samples;
875 period += entry->period;
876 }
877 ++offset;
878 }
879
880 if (sym_hist->nr_samples) {
881 data->he.period = period;
882 data->he.nr_samples = hits;
883 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
884 }
885
886 if (hists->stats.nr_non_filtered_samples)
887 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
888
889 if (sym_hist->period)
890 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
891
892 if (hists->stats.total_period)
893 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
894}
895
896static void annotation__calc_percent(struct annotation *notes,
897 struct evsel *leader, s64 len)
898{
899 struct annotation_line *al, *next;
900 struct evsel *evsel;
901
902 list_for_each_entry(al, ¬es->src->source, node) {
903 s64 end;
904 int i = 0;
905
906 if (al->offset == -1)
907 continue;
908
909 next = annotation_line__next(al, ¬es->src->source);
910 end = next ? next->offset : len;
911
912 for_each_group_evsel(evsel, leader) {
913 struct annotation_data *data;
914
915 BUG_ON(i >= al->data_nr);
916
917 if (symbol_conf.skip_empty &&
918 evsel__hists(evsel)->stats.nr_samples == 0)
919 continue;
920
921 data = &al->data[i++];
922
923 calc_percent(notes, evsel, data, al->offset, end);
924 }
925 }
926}
927
928void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
929{
930 struct annotation *notes = symbol__annotation(sym);
931
932 annotation__calc_percent(notes, evsel, symbol__size(sym));
933}
934
935static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
936{
937 struct perf_env *env = evsel__env(evsel);
938 const char *arch_name = perf_env__arch(env);
939 struct arch *arch;
940 int err;
941
942 if (!arch_name) {
943 *parch = NULL;
944 return errno;
945 }
946
947 *parch = arch = arch__find(arch_name);
948 if (arch == NULL) {
949 pr_err("%s: unsupported arch %s\n", __func__, arch_name);
950 return ENOTSUP;
951 }
952
953 if (arch->init) {
954 err = arch->init(arch, env ? env->cpuid : NULL);
955 if (err) {
956 pr_err("%s: failed to initialize %s arch priv area\n",
957 __func__, arch->name);
958 return err;
959 }
960 }
961 return 0;
962}
963
964int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
965 struct arch **parch)
966{
967 struct symbol *sym = ms->sym;
968 struct annotation *notes = symbol__annotation(sym);
969 struct annotate_args args = {
970 .evsel = evsel,
971 .options = &annotate_opts,
972 };
973 struct arch *arch = NULL;
974 int err, nr;
975
976 err = evsel__get_arch(evsel, &arch);
977 if (err < 0)
978 return err;
979
980 if (parch)
981 *parch = arch;
982
983 if (notes->src && !list_empty(¬es->src->source))
984 return 0;
985
986 args.arch = arch;
987 args.ms = *ms;
988
989 if (notes->src == NULL) {
990 notes->src = annotated_source__new();
991 if (notes->src == NULL)
992 return -1;
993 }
994
995 nr = 0;
996 if (evsel__is_group_event(evsel)) {
997 struct evsel *pos;
998
999 for_each_group_evsel(pos, evsel) {
1000 if (symbol_conf.skip_empty &&
1001 evsel__hists(pos)->stats.nr_samples == 0)
1002 continue;
1003 nr++;
1004 }
1005 }
1006 notes->src->nr_events = nr ? nr : 1;
1007
1008 if (annotate_opts.full_addr)
1009 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1010 else
1011 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1012
1013 return symbol__disassemble(sym, &args);
1014}
1015
1016static void insert_source_line(struct rb_root *root, struct annotation_line *al)
1017{
1018 struct annotation_line *iter;
1019 struct rb_node **p = &root->rb_node;
1020 struct rb_node *parent = NULL;
1021 unsigned int percent_type = annotate_opts.percent_type;
1022 int i, ret;
1023
1024 while (*p != NULL) {
1025 parent = *p;
1026 iter = rb_entry(parent, struct annotation_line, rb_node);
1027
1028 ret = strcmp(iter->path, al->path);
1029 if (ret == 0) {
1030 for (i = 0; i < al->data_nr; i++) {
1031 iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
1032 percent_type);
1033 }
1034 return;
1035 }
1036
1037 if (ret < 0)
1038 p = &(*p)->rb_left;
1039 else
1040 p = &(*p)->rb_right;
1041 }
1042
1043 for (i = 0; i < al->data_nr; i++) {
1044 al->data[i].percent_sum = annotation_data__percent(&al->data[i],
1045 percent_type);
1046 }
1047
1048 rb_link_node(&al->rb_node, parent, p);
1049 rb_insert_color(&al->rb_node, root);
1050}
1051
1052static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
1053{
1054 int i;
1055
1056 for (i = 0; i < a->data_nr; i++) {
1057 if (a->data[i].percent_sum == b->data[i].percent_sum)
1058 continue;
1059 return a->data[i].percent_sum > b->data[i].percent_sum;
1060 }
1061
1062 return 0;
1063}
1064
1065static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
1066{
1067 struct annotation_line *iter;
1068 struct rb_node **p = &root->rb_node;
1069 struct rb_node *parent = NULL;
1070
1071 while (*p != NULL) {
1072 parent = *p;
1073 iter = rb_entry(parent, struct annotation_line, rb_node);
1074
1075 if (cmp_source_line(al, iter))
1076 p = &(*p)->rb_left;
1077 else
1078 p = &(*p)->rb_right;
1079 }
1080
1081 rb_link_node(&al->rb_node, parent, p);
1082 rb_insert_color(&al->rb_node, root);
1083}
1084
1085static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1086{
1087 struct annotation_line *al;
1088 struct rb_node *node;
1089
1090 node = rb_first(src_root);
1091 while (node) {
1092 struct rb_node *next;
1093
1094 al = rb_entry(node, struct annotation_line, rb_node);
1095 next = rb_next(node);
1096 rb_erase(node, src_root);
1097
1098 __resort_source_line(dest_root, al);
1099 node = next;
1100 }
1101}
1102
1103static void print_summary(struct rb_root *root, const char *filename)
1104{
1105 struct annotation_line *al;
1106 struct rb_node *node;
1107
1108 printf("\nSorted summary for file %s\n", filename);
1109 printf("----------------------------------------------\n\n");
1110
1111 if (RB_EMPTY_ROOT(root)) {
1112 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1113 return;
1114 }
1115
1116 node = rb_first(root);
1117 while (node) {
1118 double percent, percent_max = 0.0;
1119 const char *color;
1120 char *path;
1121 int i;
1122
1123 al = rb_entry(node, struct annotation_line, rb_node);
1124 for (i = 0; i < al->data_nr; i++) {
1125 percent = al->data[i].percent_sum;
1126 color = get_percent_color(percent);
1127 color_fprintf(stdout, color, " %7.2f", percent);
1128
1129 if (percent > percent_max)
1130 percent_max = percent;
1131 }
1132
1133 path = al->path;
1134 color = get_percent_color(percent_max);
1135 color_fprintf(stdout, color, " %s\n", path);
1136
1137 node = rb_next(node);
1138 }
1139}
1140
1141static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
1142{
1143 int evidx = evsel->core.idx;
1144 struct annotation *notes = symbol__annotation(sym);
1145 struct sym_hist *h = annotation__histogram(notes, evidx);
1146 u64 len = symbol__size(sym), offset;
1147
1148 for (offset = 0; offset < len; ++offset) {
1149 struct sym_hist_entry *entry;
1150
1151 entry = annotated_source__hist_entry(notes->src, evidx, offset);
1152 if (entry && entry->nr_samples != 0)
1153 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1154 sym->start + offset, entry->nr_samples);
1155 }
1156 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
1157}
1158
1159static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
1160{
1161 char bf[32];
1162 struct annotation_line *line;
1163
1164 list_for_each_entry_reverse(line, lines, node) {
1165 if (line->offset != -1)
1166 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
1167 }
1168
1169 return 0;
1170}
1171
1172int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
1173{
1174 struct map *map = ms->map;
1175 struct symbol *sym = ms->sym;
1176 struct dso *dso = map__dso(map);
1177 char *filename;
1178 const char *d_filename;
1179 const char *evsel_name = evsel__name(evsel);
1180 struct annotation *notes = symbol__annotation(sym);
1181 struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
1182 struct annotation_line *pos, *queue = NULL;
1183 struct annotation_options *opts = &annotate_opts;
1184 u64 start = map__rip_2objdump(map, sym->start);
1185 int printed = 2, queue_len = 0, addr_fmt_width;
1186 int more = 0;
1187 bool context = opts->context;
1188 u64 len;
1189 int width = annotation__pcnt_width(notes);
1190 int graph_dotted_len;
1191 char buf[512];
1192
1193 filename = strdup(dso__long_name(dso));
1194 if (!filename)
1195 return -ENOMEM;
1196
1197 if (opts->full_path)
1198 d_filename = filename;
1199 else
1200 d_filename = basename(filename);
1201
1202 len = symbol__size(sym);
1203
1204 if (evsel__is_group_event(evsel)) {
1205 evsel__group_desc(evsel, buf, sizeof(buf));
1206 evsel_name = buf;
1207 }
1208
1209 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
1210 "percent: %s)\n",
1211 width, width, symbol_conf.show_total_period ? "Period" :
1212 symbol_conf.show_nr_samples ? "Samples" : "Percent",
1213 d_filename, evsel_name, h->nr_samples,
1214 percent_type_str(opts->percent_type));
1215
1216 printf("%-*.*s----\n",
1217 graph_dotted_len, graph_dotted_len, graph_dotted_line);
1218
1219 if (verbose > 0)
1220 symbol__annotate_hits(sym, evsel);
1221
1222 addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start);
1223
1224 list_for_each_entry(pos, ¬es->src->source, node) {
1225 int err;
1226
1227 if (context && queue == NULL) {
1228 queue = pos;
1229 queue_len = 0;
1230 }
1231
1232 err = annotation_line__print(pos, sym, start, evsel, len,
1233 opts->min_pcnt, printed, opts->max_lines,
1234 queue, addr_fmt_width, opts->percent_type);
1235
1236 switch (err) {
1237 case 0:
1238 ++printed;
1239 if (context) {
1240 printed += queue_len;
1241 queue = NULL;
1242 queue_len = 0;
1243 }
1244 break;
1245 case 1:
1246 /* filtered by max_lines */
1247 ++more;
1248 break;
1249 case -1:
1250 default:
1251 /*
1252 * Filtered by min_pcnt or non IP lines when
1253 * context != 0
1254 */
1255 if (!context)
1256 break;
1257 if (queue_len == context)
1258 queue = list_entry(queue->node.next, typeof(*queue), node);
1259 else
1260 ++queue_len;
1261 break;
1262 }
1263 }
1264
1265 free(filename);
1266
1267 return more;
1268}
1269
1270static void FILE__set_percent_color(void *fp __maybe_unused,
1271 double percent __maybe_unused,
1272 bool current __maybe_unused)
1273{
1274}
1275
1276static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
1277 int nr __maybe_unused, bool current __maybe_unused)
1278{
1279 return 0;
1280}
1281
1282static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
1283{
1284 return 0;
1285}
1286
1287static void FILE__printf(void *fp, const char *fmt, ...)
1288{
1289 va_list args;
1290
1291 va_start(args, fmt);
1292 vfprintf(fp, fmt, args);
1293 va_end(args);
1294}
1295
1296static void FILE__write_graph(void *fp, int graph)
1297{
1298 const char *s;
1299 switch (graph) {
1300
1301 case DARROW_CHAR: s = "↓"; break;
1302 case UARROW_CHAR: s = "↑"; break;
1303 case LARROW_CHAR: s = "←"; break;
1304 case RARROW_CHAR: s = "→"; break;
1305 default: s = "?"; break;
1306 }
1307
1308 fputs(s, fp);
1309}
1310
1311static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
1312{
1313 struct annotation *notes = symbol__annotation(sym);
1314 struct annotation_write_ops wops = {
1315 .first_line = true,
1316 .obj = fp,
1317 .set_color = FILE__set_color,
1318 .set_percent_color = FILE__set_percent_color,
1319 .set_jumps_percent_color = FILE__set_jumps_percent_color,
1320 .printf = FILE__printf,
1321 .write_graph = FILE__write_graph,
1322 };
1323 struct annotation_line *al;
1324
1325 list_for_each_entry(al, ¬es->src->source, node) {
1326 if (annotation_line__filter(al))
1327 continue;
1328 annotation_line__write(al, notes, &wops);
1329 fputc('\n', fp);
1330 wops.first_line = false;
1331 }
1332
1333 return 0;
1334}
1335
1336int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
1337{
1338 const char *ev_name = evsel__name(evsel);
1339 char buf[1024];
1340 char *filename;
1341 int err = -1;
1342 FILE *fp;
1343
1344 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
1345 return -1;
1346
1347 fp = fopen(filename, "w");
1348 if (fp == NULL)
1349 goto out_free_filename;
1350
1351 if (evsel__is_group_event(evsel)) {
1352 evsel__group_desc(evsel, buf, sizeof(buf));
1353 ev_name = buf;
1354 }
1355
1356 fprintf(fp, "%s() %s\nEvent: %s\n\n",
1357 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
1358 symbol__annotate_fprintf2(ms->sym, fp);
1359
1360 fclose(fp);
1361 err = 0;
1362out_free_filename:
1363 free(filename);
1364 return err;
1365}
1366
1367void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1368{
1369 struct annotation *notes = symbol__annotation(sym);
1370 struct sym_hist *h = annotation__histogram(notes, evidx);
1371
1372 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
1373}
1374
1375void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1376{
1377 struct annotation *notes = symbol__annotation(sym);
1378 struct sym_hist *h = annotation__histogram(notes, evidx);
1379 struct annotation_line *al;
1380
1381 h->nr_samples = 0;
1382 list_for_each_entry(al, ¬es->src->source, node) {
1383 struct sym_hist_entry *entry;
1384
1385 if (al->offset == -1)
1386 continue;
1387
1388 entry = annotated_source__hist_entry(notes->src, evidx, al->offset);
1389 if (entry == NULL)
1390 continue;
1391
1392 entry->nr_samples = entry->nr_samples * 7 / 8;
1393 h->nr_samples += entry->nr_samples;
1394 }
1395}
1396
1397void annotated_source__purge(struct annotated_source *as)
1398{
1399 struct annotation_line *al, *n;
1400
1401 list_for_each_entry_safe(al, n, &as->source, node) {
1402 list_del_init(&al->node);
1403 disasm_line__free(disasm_line(al));
1404 }
1405}
1406
1407static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1408{
1409 size_t printed;
1410
1411 if (dl->al.offset == -1)
1412 return fprintf(fp, "%s\n", dl->al.line);
1413
1414 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
1415
1416 if (dl->ops.raw[0] != '\0') {
1417 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1418 dl->ops.raw);
1419 }
1420
1421 return printed + fprintf(fp, "\n");
1422}
1423
1424size_t disasm__fprintf(struct list_head *head, FILE *fp)
1425{
1426 struct disasm_line *pos;
1427 size_t printed = 0;
1428
1429 list_for_each_entry(pos, head, al.node)
1430 printed += disasm_line__fprintf(pos, fp);
1431
1432 return printed;
1433}
1434
1435bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
1436{
1437 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
1438 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
1439 dl->ops.target.offset >= (s64)symbol__size(sym))
1440 return false;
1441
1442 return true;
1443}
1444
1445static void
1446annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
1447{
1448 struct annotation_line *al;
1449
1450 /* PLT symbols contain external offsets */
1451 if (strstr(sym->name, "@plt"))
1452 return;
1453
1454 list_for_each_entry(al, ¬es->src->source, node) {
1455 struct disasm_line *dl;
1456 struct annotation_line *target;
1457
1458 dl = disasm_line(al);
1459
1460 if (!disasm_line__is_valid_local_jump(dl, sym))
1461 continue;
1462
1463 target = annotated_source__get_line(notes->src,
1464 dl->ops.target.offset);
1465 /*
1466 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
1467 * have to adjust to the previous offset?
1468 */
1469 if (target == NULL)
1470 continue;
1471
1472 if (++target->jump_sources > notes->src->max_jump_sources)
1473 notes->src->max_jump_sources = target->jump_sources;
1474 }
1475}
1476
1477static void annotation__set_index(struct annotation *notes)
1478{
1479 struct annotation_line *al;
1480 struct annotated_source *src = notes->src;
1481
1482 src->widths.max_line_len = 0;
1483 src->nr_entries = 0;
1484 src->nr_asm_entries = 0;
1485
1486 list_for_each_entry(al, &src->source, node) {
1487 size_t line_len = strlen(al->line);
1488
1489 if (src->widths.max_line_len < line_len)
1490 src->widths.max_line_len = line_len;
1491 al->idx = src->nr_entries++;
1492 if (al->offset != -1)
1493 al->idx_asm = src->nr_asm_entries++;
1494 else
1495 al->idx_asm = -1;
1496 }
1497}
1498
1499static inline int width_jumps(int n)
1500{
1501 if (n >= 100)
1502 return 5;
1503 if (n / 10)
1504 return 2;
1505 return 1;
1506}
1507
1508static int annotation__max_ins_name(struct annotation *notes)
1509{
1510 int max_name = 0, len;
1511 struct annotation_line *al;
1512
1513 list_for_each_entry(al, ¬es->src->source, node) {
1514 if (al->offset == -1)
1515 continue;
1516
1517 len = strlen(disasm_line(al)->ins.name);
1518 if (max_name < len)
1519 max_name = len;
1520 }
1521
1522 return max_name;
1523}
1524
1525static void
1526annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
1527{
1528 notes->src->widths.addr = notes->src->widths.target =
1529 notes->src->widths.min_addr = hex_width(symbol__size(sym));
1530 notes->src->widths.max_addr = hex_width(sym->end);
1531 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
1532 notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
1533}
1534
1535void annotation__update_column_widths(struct annotation *notes)
1536{
1537 if (annotate_opts.use_offset)
1538 notes->src->widths.target = notes->src->widths.min_addr;
1539 else if (annotate_opts.full_addr)
1540 notes->src->widths.target = BITS_PER_LONG / 4;
1541 else
1542 notes->src->widths.target = notes->src->widths.max_addr;
1543
1544 notes->src->widths.addr = notes->src->widths.target;
1545
1546 if (annotate_opts.show_nr_jumps)
1547 notes->src->widths.addr += notes->src->widths.jumps + 1;
1548}
1549
1550void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
1551{
1552 annotate_opts.full_addr = !annotate_opts.full_addr;
1553
1554 if (annotate_opts.full_addr)
1555 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
1556 else
1557 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
1558
1559 annotation__update_column_widths(notes);
1560}
1561
1562static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
1563 struct rb_root *root)
1564{
1565 struct annotation_line *al;
1566 struct rb_root tmp_root = RB_ROOT;
1567
1568 list_for_each_entry(al, ¬es->src->source, node) {
1569 double percent_max = 0.0;
1570 u64 addr;
1571 int i;
1572
1573 for (i = 0; i < al->data_nr; i++) {
1574 double percent;
1575
1576 percent = annotation_data__percent(&al->data[i],
1577 annotate_opts.percent_type);
1578
1579 if (percent > percent_max)
1580 percent_max = percent;
1581 }
1582
1583 if (percent_max <= 0.5)
1584 continue;
1585
1586 addr = map__rip_2objdump(ms->map, ms->sym->start);
1587 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
1588 false, true, ms->sym->start + al->offset);
1589 insert_source_line(&tmp_root, al);
1590 }
1591
1592 resort_source_line(root, &tmp_root);
1593}
1594
1595static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
1596{
1597 struct annotation *notes = symbol__annotation(ms->sym);
1598
1599 annotation__calc_lines(notes, ms, root);
1600}
1601
1602int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
1603{
1604 struct dso *dso = map__dso(ms->map);
1605 struct symbol *sym = ms->sym;
1606 struct rb_root source_line = RB_ROOT;
1607 struct hists *hists = evsel__hists(evsel);
1608 char buf[1024];
1609 int err;
1610
1611 err = symbol__annotate2(ms, evsel, NULL);
1612 if (err) {
1613 char msg[BUFSIZ];
1614
1615 dso__set_annotate_warned(dso);
1616 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1617 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1618 return -1;
1619 }
1620
1621 if (annotate_opts.print_lines) {
1622 srcline_full_filename = annotate_opts.full_path;
1623 symbol__calc_lines(ms, &source_line);
1624 print_summary(&source_line, dso__long_name(dso));
1625 }
1626
1627 hists__scnprintf_title(hists, buf, sizeof(buf));
1628 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
1629 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
1630 symbol__annotate_fprintf2(sym, stdout);
1631
1632 annotated_source__purge(symbol__annotation(sym)->src);
1633
1634 return 0;
1635}
1636
1637int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
1638{
1639 struct dso *dso = map__dso(ms->map);
1640 struct symbol *sym = ms->sym;
1641 struct rb_root source_line = RB_ROOT;
1642 int err;
1643
1644 err = symbol__annotate(ms, evsel, NULL);
1645 if (err) {
1646 char msg[BUFSIZ];
1647
1648 dso__set_annotate_warned(dso);
1649 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
1650 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
1651 return -1;
1652 }
1653
1654 symbol__calc_percent(sym, evsel);
1655
1656 if (annotate_opts.print_lines) {
1657 srcline_full_filename = annotate_opts.full_path;
1658 symbol__calc_lines(ms, &source_line);
1659 print_summary(&source_line, dso__long_name(dso));
1660 }
1661
1662 symbol__annotate_printf(ms, evsel);
1663
1664 annotated_source__purge(symbol__annotation(sym)->src);
1665
1666 return 0;
1667}
1668
1669bool ui__has_annotation(void)
1670{
1671 return use_browser == 1 && perf_hpp_list.sym;
1672}
1673
1674
1675static double annotation_line__max_percent(struct annotation_line *al,
1676 unsigned int percent_type)
1677{
1678 double percent_max = 0.0;
1679 int i;
1680
1681 for (i = 0; i < al->data_nr; i++) {
1682 double percent;
1683
1684 percent = annotation_data__percent(&al->data[i],
1685 percent_type);
1686
1687 if (percent > percent_max)
1688 percent_max = percent;
1689 }
1690
1691 return percent_max;
1692}
1693
1694static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
1695 void *obj, char *bf, size_t size,
1696 void (*obj__printf)(void *obj, const char *fmt, ...),
1697 void (*obj__write_graph)(void *obj, int graph))
1698{
1699 if (dl->ins.ops && dl->ins.ops->scnprintf) {
1700 if (ins__is_jump(&dl->ins)) {
1701 bool fwd;
1702
1703 if (dl->ops.target.outside)
1704 goto call_like;
1705 fwd = dl->ops.target.offset > dl->al.offset;
1706 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
1707 obj__printf(obj, " ");
1708 } else if (ins__is_call(&dl->ins)) {
1709call_like:
1710 obj__write_graph(obj, RARROW_CHAR);
1711 obj__printf(obj, " ");
1712 } else if (ins__is_ret(&dl->ins)) {
1713 obj__write_graph(obj, LARROW_CHAR);
1714 obj__printf(obj, " ");
1715 } else {
1716 obj__printf(obj, " ");
1717 }
1718 } else {
1719 obj__printf(obj, " ");
1720 }
1721
1722 disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
1723 notes->src->widths.max_ins_name);
1724}
1725
1726static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
1727{
1728 double ipc = 0.0, coverage = 0.0;
1729 struct annotated_branch *branch = annotation__get_branch(notes);
1730
1731 if (branch && branch->hit_cycles)
1732 ipc = branch->hit_insn / ((double)branch->hit_cycles);
1733
1734 if (branch && branch->total_insn) {
1735 coverage = branch->cover_insn * 100.0 /
1736 ((double)branch->total_insn);
1737 }
1738
1739 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
1740 ipc, coverage);
1741}
1742
1743int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header)
1744{
1745 struct evsel *pos;
1746 struct strbuf sb;
1747
1748 if (evsel->evlist->nr_br_cntr <= 0)
1749 return -ENOTSUP;
1750
1751 strbuf_init(&sb, /*hint=*/ 0);
1752
1753 if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n"))
1754 goto err;
1755
1756 evlist__for_each_entry(evsel->evlist, pos) {
1757 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
1758 continue;
1759 if (header && strbuf_addf(&sb, "#"))
1760 goto err;
1761
1762 if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name))
1763 goto err;
1764 }
1765
1766 if (header && strbuf_addf(&sb, "#"))
1767 goto err;
1768 if (strbuf_addf(&sb, " '-' No event occurs\n"))
1769 goto err;
1770
1771 if (header && strbuf_addf(&sb, "#"))
1772 goto err;
1773 if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n"))
1774 goto err;
1775
1776 *str = strbuf_detach(&sb, NULL);
1777
1778 return 0;
1779err:
1780 strbuf_release(&sb);
1781 return -ENOMEM;
1782}
1783
1784/* Assume the branch counter saturated at 3 */
1785#define ANNOTATION_BR_CNTR_SATURATION 3
1786
1787int annotation_br_cntr_entry(char **str, int br_cntr_nr,
1788 u64 *br_cntr, int num_aggr,
1789 struct evsel *evsel)
1790{
1791 struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL;
1792 bool saturated = false;
1793 int i, j, avg, used;
1794 struct strbuf sb;
1795
1796 strbuf_init(&sb, /*hint=*/ 0);
1797 for (i = 0; i < br_cntr_nr; i++) {
1798 used = 0;
1799 avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) /
1800 (double)num_aggr);
1801
1802 /*
1803 * A histogram with the abbr name is displayed by default.
1804 * With -v, the exact number of branch counter is displayed.
1805 */
1806 if (verbose) {
1807 evlist__for_each_entry_from(evsel->evlist, pos) {
1808 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1809 (pos->br_cntr_idx == i))
1810 break;
1811 }
1812 if (strbuf_addstr(&sb, pos->abbr_name))
1813 goto err;
1814
1815 if (!br_cntr[i]) {
1816 if (strbuf_addstr(&sb, "=-"))
1817 goto err;
1818 } else {
1819 if (strbuf_addf(&sb, "=%d", avg))
1820 goto err;
1821 }
1822 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) {
1823 if (strbuf_addch(&sb, '+'))
1824 goto err;
1825 } else {
1826 if (strbuf_addch(&sb, ' '))
1827 goto err;
1828 }
1829
1830 if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ','))
1831 goto err;
1832 continue;
1833 }
1834
1835 if (strbuf_addch(&sb, '|'))
1836 goto err;
1837
1838 if (!br_cntr[i]) {
1839 if (strbuf_addch(&sb, '-'))
1840 goto err;
1841 used++;
1842 } else {
1843 evlist__for_each_entry_from(evsel->evlist, pos) {
1844 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
1845 (pos->br_cntr_idx == i))
1846 break;
1847 }
1848 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG)
1849 saturated = true;
1850
1851 for (j = 0; j < avg; j++, used++) {
1852 /* Print + if the number of logged events > 3 */
1853 if (j >= ANNOTATION_BR_CNTR_SATURATION) {
1854 saturated = true;
1855 break;
1856 }
1857 if (strbuf_addstr(&sb, pos->abbr_name))
1858 goto err;
1859 }
1860
1861 if (saturated) {
1862 if (strbuf_addch(&sb, '+'))
1863 goto err;
1864 used++;
1865 }
1866 pos = list_next_entry(pos, core.node);
1867 }
1868
1869 for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) {
1870 if (strbuf_addch(&sb, ' '))
1871 goto err;
1872 }
1873 }
1874
1875 if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' '))
1876 goto err;
1877
1878 *str = strbuf_detach(&sb, NULL);
1879
1880 return 0;
1881err:
1882 strbuf_release(&sb);
1883 return -ENOMEM;
1884}
1885
1886static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
1887 bool first_line, bool current_entry, bool change_color, int width,
1888 void *obj, unsigned int percent_type,
1889 int (*obj__set_color)(void *obj, int color),
1890 void (*obj__set_percent_color)(void *obj, double percent, bool current),
1891 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
1892 void (*obj__printf)(void *obj, const char *fmt, ...),
1893 void (*obj__write_graph)(void *obj, int graph))
1894
1895{
1896 double percent_max = annotation_line__max_percent(al, percent_type);
1897 int pcnt_width = annotation__pcnt_width(notes),
1898 cycles_width = annotation__cycles_width(notes);
1899 bool show_title = false;
1900 char bf[256];
1901 int printed;
1902
1903 if (first_line && (al->offset == -1 || percent_max == 0.0)) {
1904 if (notes->branch && al->cycles) {
1905 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
1906 show_title = true;
1907 } else
1908 show_title = true;
1909 }
1910
1911 if (al->offset != -1 && percent_max != 0.0) {
1912 int i;
1913
1914 for (i = 0; i < al->data_nr; i++) {
1915 double percent;
1916
1917 percent = annotation_data__percent(&al->data[i], percent_type);
1918
1919 obj__set_percent_color(obj, percent, current_entry);
1920 if (symbol_conf.show_total_period) {
1921 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
1922 } else if (symbol_conf.show_nr_samples) {
1923 obj__printf(obj, "%7" PRIu64 " ",
1924 al->data[i].he.nr_samples);
1925 } else {
1926 obj__printf(obj, "%7.2f ", percent);
1927 }
1928 }
1929 } else {
1930 obj__set_percent_color(obj, 0, current_entry);
1931
1932 if (!show_title)
1933 obj__printf(obj, "%-*s", pcnt_width, " ");
1934 else {
1935 obj__printf(obj, "%-*s", pcnt_width,
1936 symbol_conf.show_total_period ? "Period" :
1937 symbol_conf.show_nr_samples ? "Samples" : "Percent");
1938 }
1939 }
1940
1941 if (notes->branch) {
1942 if (al->cycles && al->cycles->ipc)
1943 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
1944 else if (!show_title)
1945 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
1946 else
1947 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
1948
1949 if (!annotate_opts.show_minmax_cycle) {
1950 if (al->cycles && al->cycles->avg)
1951 obj__printf(obj, "%*" PRIu64 " ",
1952 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
1953 else if (!show_title)
1954 obj__printf(obj, "%*s",
1955 ANNOTATION__CYCLES_WIDTH, " ");
1956 else
1957 obj__printf(obj, "%*s ",
1958 ANNOTATION__CYCLES_WIDTH - 1,
1959 "Cycle");
1960 } else {
1961 if (al->cycles) {
1962 char str[32];
1963
1964 scnprintf(str, sizeof(str),
1965 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
1966 al->cycles->avg, al->cycles->min,
1967 al->cycles->max);
1968
1969 obj__printf(obj, "%*s ",
1970 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1971 str);
1972 } else if (!show_title)
1973 obj__printf(obj, "%*s",
1974 ANNOTATION__MINMAX_CYCLES_WIDTH,
1975 " ");
1976 else
1977 obj__printf(obj, "%*s ",
1978 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
1979 "Cycle(min/max)");
1980 }
1981
1982 if (annotate_opts.show_br_cntr) {
1983 if (show_title) {
1984 obj__printf(obj, "%*s ",
1985 ANNOTATION__BR_CNTR_WIDTH,
1986 "Branch Counter");
1987 } else {
1988 char *buf;
1989
1990 if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr,
1991 al->num_aggr, al->evsel)) {
1992 obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf);
1993 free(buf);
1994 }
1995 }
1996 }
1997
1998 if (show_title && !*al->line) {
1999 ipc_coverage_string(bf, sizeof(bf), notes);
2000 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
2001 }
2002 }
2003
2004 obj__printf(obj, " ");
2005
2006 if (!*al->line)
2007 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
2008 else if (al->offset == -1) {
2009 if (al->line_nr && annotate_opts.show_linenr)
2010 printed = scnprintf(bf, sizeof(bf), "%-*d ",
2011 notes->src->widths.addr + 1, al->line_nr);
2012 else
2013 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2014 notes->src->widths.addr, " ");
2015 obj__printf(obj, bf);
2016 obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
2017 } else {
2018 u64 addr = al->offset;
2019 int color = -1;
2020
2021 if (!annotate_opts.use_offset)
2022 addr += notes->src->start;
2023
2024 if (!annotate_opts.use_offset) {
2025 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
2026 } else {
2027 if (al->jump_sources &&
2028 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
2029 if (annotate_opts.show_nr_jumps) {
2030 int prev;
2031 printed = scnprintf(bf, sizeof(bf), "%*d ",
2032 notes->src->widths.jumps,
2033 al->jump_sources);
2034 prev = obj__set_jumps_percent_color(obj, al->jump_sources,
2035 current_entry);
2036 obj__printf(obj, bf);
2037 obj__set_color(obj, prev);
2038 }
2039print_addr:
2040 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
2041 notes->src->widths.target, addr);
2042 } else if (ins__is_call(&disasm_line(al)->ins) &&
2043 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
2044 goto print_addr;
2045 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
2046 goto print_addr;
2047 } else {
2048 printed = scnprintf(bf, sizeof(bf), "%-*s ",
2049 notes->src->widths.addr, " ");
2050 }
2051 }
2052
2053 if (change_color)
2054 color = obj__set_color(obj, HE_COLORSET_ADDR);
2055 obj__printf(obj, bf);
2056 if (change_color)
2057 obj__set_color(obj, color);
2058
2059 disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
2060
2061 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
2062 }
2063
2064}
2065
2066void annotation_line__write(struct annotation_line *al, struct annotation *notes,
2067 struct annotation_write_ops *wops)
2068{
2069 __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
2070 wops->change_color, wops->width, wops->obj,
2071 annotate_opts.percent_type,
2072 wops->set_color, wops->set_percent_color,
2073 wops->set_jumps_percent_color, wops->printf,
2074 wops->write_graph);
2075}
2076
2077int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
2078 struct arch **parch)
2079{
2080 struct symbol *sym = ms->sym;
2081 struct annotation *notes = symbol__annotation(sym);
2082 size_t size = symbol__size(sym);
2083 int err;
2084
2085 err = symbol__annotate(ms, evsel, parch);
2086 if (err)
2087 return err;
2088
2089 symbol__calc_percent(sym, evsel);
2090
2091 annotation__set_index(notes);
2092 annotation__mark_jump_targets(notes, sym);
2093
2094 err = annotation__compute_ipc(notes, size, evsel);
2095 if (err)
2096 return err;
2097
2098 annotation__init_column_widths(notes, sym);
2099 annotation__update_column_widths(notes);
2100 sym->annotate2 = 1;
2101
2102 return 0;
2103}
2104
2105const char * const perf_disassembler__strs[] = {
2106 [PERF_DISASM_UNKNOWN] = "unknown",
2107 [PERF_DISASM_LLVM] = "llvm",
2108 [PERF_DISASM_CAPSTONE] = "capstone",
2109 [PERF_DISASM_OBJDUMP] = "objdump",
2110};
2111
2112
2113static void annotation_options__add_disassembler(struct annotation_options *options,
2114 enum perf_disassembler dis)
2115{
2116 for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) {
2117 if (options->disassemblers[i] == dis) {
2118 /* Disassembler is already present then don't add again. */
2119 return;
2120 }
2121 if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) {
2122 /* Found a free slot. */
2123 options->disassemblers[i] = dis;
2124 return;
2125 }
2126 }
2127 pr_err("Failed to add disassembler %d\n", dis);
2128}
2129
2130static int annotation_options__add_disassemblers_str(struct annotation_options *options,
2131 const char *str)
2132{
2133 while (str && *str != '\0') {
2134 const char *comma = strchr(str, ',');
2135 int len = comma ? comma - str : (int)strlen(str);
2136 bool match = false;
2137
2138 for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) {
2139 const char *dis_str = perf_disassembler__strs[i];
2140
2141 if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) {
2142 annotation_options__add_disassembler(options, i);
2143 match = true;
2144 break;
2145 }
2146 }
2147 if (!match) {
2148 pr_err("Invalid disassembler '%.*s'\n", len, str);
2149 return -1;
2150 }
2151 str = comma ? comma + 1 : NULL;
2152 }
2153 return 0;
2154}
2155
2156static int annotation__config(const char *var, const char *value, void *data)
2157{
2158 struct annotation_options *opt = data;
2159
2160 if (!strstarts(var, "annotate."))
2161 return 0;
2162
2163 if (!strcmp(var, "annotate.offset_level")) {
2164 perf_config_u8(&opt->offset_level, "offset_level", value);
2165
2166 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
2167 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
2168 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
2169 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
2170 } else if (!strcmp(var, "annotate.disassemblers")) {
2171 int err = annotation_options__add_disassemblers_str(opt, value);
2172
2173 if (err)
2174 return err;
2175 } else if (!strcmp(var, "annotate.hide_src_code")) {
2176 opt->hide_src_code = perf_config_bool("hide_src_code", value);
2177 } else if (!strcmp(var, "annotate.jump_arrows")) {
2178 opt->jump_arrows = perf_config_bool("jump_arrows", value);
2179 } else if (!strcmp(var, "annotate.show_linenr")) {
2180 opt->show_linenr = perf_config_bool("show_linenr", value);
2181 } else if (!strcmp(var, "annotate.show_nr_jumps")) {
2182 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
2183 } else if (!strcmp(var, "annotate.show_nr_samples")) {
2184 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
2185 value);
2186 } else if (!strcmp(var, "annotate.show_total_period")) {
2187 symbol_conf.show_total_period = perf_config_bool("show_total_period",
2188 value);
2189 } else if (!strcmp(var, "annotate.use_offset")) {
2190 opt->use_offset = perf_config_bool("use_offset", value);
2191 } else if (!strcmp(var, "annotate.disassembler_style")) {
2192 opt->disassembler_style = strdup(value);
2193 if (!opt->disassembler_style) {
2194 pr_err("Not enough memory for annotate.disassembler_style\n");
2195 return -1;
2196 }
2197 } else if (!strcmp(var, "annotate.objdump")) {
2198 opt->objdump_path = strdup(value);
2199 if (!opt->objdump_path) {
2200 pr_err("Not enough memory for annotate.objdump\n");
2201 return -1;
2202 }
2203 } else if (!strcmp(var, "annotate.addr2line")) {
2204 symbol_conf.addr2line_path = strdup(value);
2205 if (!symbol_conf.addr2line_path) {
2206 pr_err("Not enough memory for annotate.addr2line\n");
2207 return -1;
2208 }
2209 } else if (!strcmp(var, "annotate.demangle")) {
2210 symbol_conf.demangle = perf_config_bool("demangle", value);
2211 } else if (!strcmp(var, "annotate.demangle_kernel")) {
2212 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
2213 } else {
2214 pr_debug("%s variable unknown, ignoring...", var);
2215 }
2216
2217 return 0;
2218}
2219
2220void annotation_options__init(void)
2221{
2222 struct annotation_options *opt = &annotate_opts;
2223
2224 memset(opt, 0, sizeof(*opt));
2225
2226 /* Default values. */
2227 opt->use_offset = true;
2228 opt->jump_arrows = true;
2229 opt->annotate_src = true;
2230 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
2231 opt->percent_type = PERCENT_PERIOD_LOCAL;
2232}
2233
2234void annotation_options__exit(void)
2235{
2236 zfree(&annotate_opts.disassembler_style);
2237 zfree(&annotate_opts.objdump_path);
2238}
2239
2240static void annotation_options__default_init_disassemblers(struct annotation_options *options)
2241{
2242 if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) {
2243 /* Already initialized. */
2244 return;
2245 }
2246#ifdef HAVE_LIBLLVM_SUPPORT
2247 annotation_options__add_disassembler(options, PERF_DISASM_LLVM);
2248#endif
2249#ifdef HAVE_LIBCAPSTONE_SUPPORT
2250 annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE);
2251#endif
2252 annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP);
2253}
2254
2255void annotation_config__init(void)
2256{
2257 perf_config(annotation__config, &annotate_opts);
2258 annotation_options__default_init_disassemblers(&annotate_opts);
2259}
2260
2261static unsigned int parse_percent_type(char *str1, char *str2)
2262{
2263 unsigned int type = (unsigned int) -1;
2264
2265 if (!strcmp("period", str1)) {
2266 if (!strcmp("local", str2))
2267 type = PERCENT_PERIOD_LOCAL;
2268 else if (!strcmp("global", str2))
2269 type = PERCENT_PERIOD_GLOBAL;
2270 }
2271
2272 if (!strcmp("hits", str1)) {
2273 if (!strcmp("local", str2))
2274 type = PERCENT_HITS_LOCAL;
2275 else if (!strcmp("global", str2))
2276 type = PERCENT_HITS_GLOBAL;
2277 }
2278
2279 return type;
2280}
2281
2282int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
2283 int unset __maybe_unused)
2284{
2285 unsigned int type;
2286 char *str1, *str2;
2287 int err = -1;
2288
2289 str1 = strdup(_str);
2290 if (!str1)
2291 return -ENOMEM;
2292
2293 str2 = strchr(str1, '-');
2294 if (!str2)
2295 goto out;
2296
2297 *str2++ = 0;
2298
2299 type = parse_percent_type(str1, str2);
2300 if (type == (unsigned int) -1)
2301 type = parse_percent_type(str2, str1);
2302 if (type != (unsigned int) -1) {
2303 annotate_opts.percent_type = type;
2304 err = 0;
2305 }
2306
2307out:
2308 free(str1);
2309 return err;
2310}
2311
2312int annotate_check_args(void)
2313{
2314 struct annotation_options *args = &annotate_opts;
2315
2316 if (args->prefix_strip && !args->prefix) {
2317 pr_err("--prefix-strip requires --prefix\n");
2318 return -1;
2319 }
2320 return 0;
2321}
2322
2323/*
2324 * Get register number and access offset from the given instruction.
2325 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
2326 * to revisit the format when it handles different architecture.
2327 * Fills @reg and @offset when return 0.
2328 */
2329static int extract_reg_offset(struct arch *arch, const char *str,
2330 struct annotated_op_loc *op_loc)
2331{
2332 char *p;
2333 char *regname;
2334
2335 if (arch->objdump.register_char == 0)
2336 return -1;
2337
2338 /*
2339 * It should start from offset, but it's possible to skip 0
2340 * in the asm. So 0(%rax) should be same as (%rax).
2341 *
2342 * However, it also start with a segment select register like
2343 * %gs:0x18(%rbx). In that case it should skip the part.
2344 */
2345 if (*str == arch->objdump.register_char) {
2346 if (arch__is(arch, "x86")) {
2347 /* FIXME: Handle other segment registers */
2348 if (!strncmp(str, "%gs:", 4))
2349 op_loc->segment = INSN_SEG_X86_GS;
2350 }
2351
2352 while (*str && !isdigit(*str) &&
2353 *str != arch->objdump.memory_ref_char)
2354 str++;
2355 }
2356
2357 op_loc->offset = strtol(str, &p, 0);
2358
2359 p = strchr(p, arch->objdump.register_char);
2360 if (p == NULL)
2361 return -1;
2362
2363 regname = strdup(p);
2364 if (regname == NULL)
2365 return -1;
2366
2367 op_loc->reg1 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags);
2368 free(regname);
2369
2370 /* Get the second register */
2371 if (op_loc->multi_regs) {
2372 p = strchr(p + 1, arch->objdump.register_char);
2373 if (p == NULL)
2374 return -1;
2375
2376 regname = strdup(p);
2377 if (regname == NULL)
2378 return -1;
2379
2380 op_loc->reg2 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags);
2381 free(regname);
2382 }
2383 return 0;
2384}
2385
2386/**
2387 * annotate_get_insn_location - Get location of instruction
2388 * @arch: the architecture info
2389 * @dl: the target instruction
2390 * @loc: a buffer to save the data
2391 *
2392 * Get detailed location info (register and offset) in the instruction.
2393 * It needs both source and target operand and whether it accesses a
2394 * memory location. The offset field is meaningful only when the
2395 * corresponding mem flag is set. The reg2 field is meaningful only
2396 * when multi_regs flag is set.
2397 *
2398 * Some examples on x86:
2399 *
2400 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0
2401 * # dst_reg1 = rcx, dst_mem = 0
2402 *
2403 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
2404 * # dst_reg1 = r8, dst_mem = 0
2405 *
2406 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
2407 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
2408 * # dst_multi_regs = 1, dst_offset = 8
2409 */
2410int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
2411 struct annotated_insn_loc *loc)
2412{
2413 struct ins_operands *ops;
2414 struct annotated_op_loc *op_loc;
2415 int i;
2416
2417 if (ins__is_lock(&dl->ins))
2418 ops = dl->ops.locked.ops;
2419 else
2420 ops = &dl->ops;
2421
2422 if (ops == NULL)
2423 return -1;
2424
2425 memset(loc, 0, sizeof(*loc));
2426
2427 for_each_insn_op_loc(loc, i, op_loc) {
2428 const char *insn_str = ops->source.raw;
2429 bool multi_regs = ops->source.multi_regs;
2430 bool mem_ref = ops->source.mem_ref;
2431
2432 if (i == INSN_OP_TARGET) {
2433 insn_str = ops->target.raw;
2434 multi_regs = ops->target.multi_regs;
2435 mem_ref = ops->target.mem_ref;
2436 }
2437
2438 /* Invalidate the register by default */
2439 op_loc->reg1 = -1;
2440 op_loc->reg2 = -1;
2441
2442 if (insn_str == NULL) {
2443 if (!arch__is(arch, "powerpc"))
2444 continue;
2445 }
2446
2447 /*
2448 * For powerpc, call get_powerpc_regs function which extracts the
2449 * required fields for op_loc, ie reg1, reg2, offset from the
2450 * raw instruction.
2451 */
2452 if (arch__is(arch, "powerpc")) {
2453 op_loc->mem_ref = mem_ref;
2454 op_loc->multi_regs = multi_regs;
2455 get_powerpc_regs(dl->raw.raw_insn, !i, op_loc);
2456 } else if (strchr(insn_str, arch->objdump.memory_ref_char)) {
2457 op_loc->mem_ref = true;
2458 op_loc->multi_regs = multi_regs;
2459 extract_reg_offset(arch, insn_str, op_loc);
2460 } else {
2461 char *s, *p = NULL;
2462
2463 if (arch__is(arch, "x86")) {
2464 /* FIXME: Handle other segment registers */
2465 if (!strncmp(insn_str, "%gs:", 4)) {
2466 op_loc->segment = INSN_SEG_X86_GS;
2467 op_loc->offset = strtol(insn_str + 4,
2468 &p, 0);
2469 if (p && p != insn_str + 4)
2470 op_loc->imm = true;
2471 continue;
2472 }
2473 }
2474
2475 s = strdup(insn_str);
2476 if (s == NULL)
2477 return -1;
2478
2479 if (*s == arch->objdump.register_char)
2480 op_loc->reg1 = get_dwarf_regnum(s, arch->e_machine, arch->e_flags);
2481 else if (*s == arch->objdump.imm_char) {
2482 op_loc->offset = strtol(s + 1, &p, 0);
2483 if (p && p != s + 1)
2484 op_loc->imm = true;
2485 }
2486 free(s);
2487 }
2488 }
2489
2490 return 0;
2491}
2492
2493static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
2494 bool allow_update)
2495{
2496 struct disasm_line *dl;
2497 struct annotation *notes;
2498
2499 notes = symbol__annotation(sym);
2500
2501 list_for_each_entry(dl, ¬es->src->source, al.node) {
2502 if (dl->al.offset == -1)
2503 continue;
2504
2505 if (sym->start + dl->al.offset == ip) {
2506 /*
2507 * llvm-objdump places "lock" in a separate line and
2508 * in that case, we want to get the next line.
2509 */
2510 if (ins__is_lock(&dl->ins) &&
2511 *dl->ops.raw == '\0' && allow_update) {
2512 ip++;
2513 continue;
2514 }
2515 return dl;
2516 }
2517 }
2518 return NULL;
2519}
2520
2521static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
2522 const char *name)
2523{
2524 struct annotated_item_stat *istat;
2525
2526 list_for_each_entry(istat, head, list) {
2527 if (!strcmp(istat->name, name))
2528 return istat;
2529 }
2530
2531 istat = zalloc(sizeof(*istat));
2532 if (istat == NULL)
2533 return NULL;
2534
2535 istat->name = strdup(name);
2536 if ((istat->name == NULL) || (!strlen(istat->name))) {
2537 free(istat);
2538 return NULL;
2539 }
2540
2541 list_add_tail(&istat->list, head);
2542 return istat;
2543}
2544
2545static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
2546{
2547 if (arch__is(arch, "x86")) {
2548 if (!strncmp(dl->ins.name, "push", 4) ||
2549 !strncmp(dl->ins.name, "pop", 3) ||
2550 !strncmp(dl->ins.name, "call", 4) ||
2551 !strncmp(dl->ins.name, "ret", 3))
2552 return true;
2553 }
2554
2555 return false;
2556}
2557
2558static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
2559{
2560 /* On x86_64, %gs:40 is used for stack canary */
2561 if (arch__is(arch, "x86")) {
2562 if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
2563 loc->offset == 40)
2564 return true;
2565 }
2566
2567 return false;
2568}
2569
2570static struct disasm_line *
2571annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
2572{
2573 struct list_head *sources = ¬es->src->source;
2574 struct disasm_line *prev;
2575
2576 if (curr == list_first_entry(sources, struct disasm_line, al.node))
2577 return NULL;
2578
2579 prev = list_prev_entry(curr, al.node);
2580 while (prev->al.offset == -1 &&
2581 prev != list_first_entry(sources, struct disasm_line, al.node))
2582 prev = list_prev_entry(prev, al.node);
2583
2584 if (prev->al.offset == -1)
2585 return NULL;
2586
2587 return prev;
2588}
2589
2590static struct disasm_line *
2591annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
2592{
2593 struct list_head *sources = ¬es->src->source;
2594 struct disasm_line *next;
2595
2596 if (curr == list_last_entry(sources, struct disasm_line, al.node))
2597 return NULL;
2598
2599 next = list_next_entry(curr, al.node);
2600 while (next->al.offset == -1 &&
2601 next != list_last_entry(sources, struct disasm_line, al.node))
2602 next = list_next_entry(next, al.node);
2603
2604 if (next->al.offset == -1)
2605 return NULL;
2606
2607 return next;
2608}
2609
2610u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
2611 struct disasm_line *dl)
2612{
2613 struct annotation *notes;
2614 struct disasm_line *next;
2615 u64 addr;
2616
2617 notes = symbol__annotation(ms->sym);
2618 /*
2619 * PC-relative addressing starts from the next instruction address
2620 * But the IP is for the current instruction. Since disasm_line
2621 * doesn't have the instruction size, calculate it using the next
2622 * disasm_line. If it's the last one, we can use symbol's end
2623 * address directly.
2624 */
2625 next = annotation__next_asm_line(notes, dl);
2626 if (next == NULL)
2627 addr = ms->sym->end + offset;
2628 else
2629 addr = ip + (next->al.offset - dl->al.offset) + offset;
2630
2631 return map__rip_2objdump(ms->map, addr);
2632}
2633
2634static struct debuginfo_cache {
2635 struct dso *dso;
2636 struct debuginfo *dbg;
2637} di_cache;
2638
2639void debuginfo_cache__delete(void)
2640{
2641 dso__put(di_cache.dso);
2642 di_cache.dso = NULL;
2643
2644 debuginfo__delete(di_cache.dbg);
2645 di_cache.dbg = NULL;
2646}
2647
2648/**
2649 * hist_entry__get_data_type - find data type for given hist entry
2650 * @he: hist entry
2651 *
2652 * This function first annotates the instruction at @he->ip and extracts
2653 * register and offset info from it. Then it searches the DWARF debug
2654 * info to get a variable and type information using the address, register,
2655 * and offset.
2656 */
2657struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
2658{
2659 struct map_symbol *ms = &he->ms;
2660 struct evsel *evsel = hists_to_evsel(he->hists);
2661 struct arch *arch;
2662 struct disasm_line *dl;
2663 struct annotated_insn_loc loc;
2664 struct annotated_op_loc *op_loc;
2665 struct annotated_data_type *mem_type;
2666 struct annotated_item_stat *istat;
2667 u64 ip = he->ip;
2668 int i;
2669
2670 ann_data_stat.total++;
2671
2672 if (ms->map == NULL || ms->sym == NULL) {
2673 ann_data_stat.no_sym++;
2674 return NULL;
2675 }
2676
2677 if (!symbol_conf.init_annotation) {
2678 ann_data_stat.no_sym++;
2679 return NULL;
2680 }
2681
2682 /*
2683 * di_cache holds a pair of values, but code below assumes
2684 * di_cache.dso can be compared/updated and di_cache.dbg can be
2685 * read/updated independently from each other. That assumption only
2686 * holds in single threaded code.
2687 */
2688 assert(perf_singlethreaded);
2689
2690 if (map__dso(ms->map) != di_cache.dso) {
2691 dso__put(di_cache.dso);
2692 di_cache.dso = dso__get(map__dso(ms->map));
2693
2694 debuginfo__delete(di_cache.dbg);
2695 di_cache.dbg = debuginfo__new(dso__long_name(di_cache.dso));
2696 }
2697
2698 if (di_cache.dbg == NULL) {
2699 ann_data_stat.no_dbginfo++;
2700 return NULL;
2701 }
2702
2703 /* Make sure it has the disasm of the function */
2704 if (symbol__annotate(ms, evsel, &arch) < 0) {
2705 ann_data_stat.no_insn++;
2706 return NULL;
2707 }
2708
2709 /*
2710 * Get a disasm to extract the location from the insn.
2711 * This is too slow...
2712 */
2713 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
2714 if (dl == NULL) {
2715 ann_data_stat.no_insn++;
2716 return NULL;
2717 }
2718
2719retry:
2720 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
2721 if (istat == NULL) {
2722 ann_data_stat.no_insn++;
2723 return NULL;
2724 }
2725
2726 if (annotate_get_insn_location(arch, dl, &loc) < 0) {
2727 ann_data_stat.no_insn_ops++;
2728 istat->bad++;
2729 return NULL;
2730 }
2731
2732 if (is_stack_operation(arch, dl)) {
2733 istat->good++;
2734 he->mem_type_off = 0;
2735 return &stackop_type;
2736 }
2737
2738 for_each_insn_op_loc(&loc, i, op_loc) {
2739 struct data_loc_info dloc = {
2740 .arch = arch,
2741 .thread = he->thread,
2742 .ms = ms,
2743 /* Recalculate IP for LOCK prefix or insn fusion */
2744 .ip = ms->sym->start + dl->al.offset,
2745 .cpumode = he->cpumode,
2746 .op = op_loc,
2747 .di = di_cache.dbg,
2748 };
2749
2750 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
2751 continue;
2752
2753 /* Recalculate IP because of LOCK prefix or insn fusion */
2754 ip = ms->sym->start + dl->al.offset;
2755
2756 /* PC-relative addressing */
2757 if (op_loc->reg1 == DWARF_REG_PC) {
2758 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
2759 op_loc->offset, dl);
2760 }
2761
2762 /* This CPU access in kernel - pretend PC-relative addressing */
2763 if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") &&
2764 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
2765 dloc.var_addr = op_loc->offset;
2766 op_loc->reg1 = DWARF_REG_PC;
2767 }
2768
2769 mem_type = find_data_type(&dloc);
2770
2771 if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
2772 istat->good++;
2773 he->mem_type_off = 0;
2774 return &canary_type;
2775 }
2776
2777 if (mem_type)
2778 istat->good++;
2779 else
2780 istat->bad++;
2781
2782 if (symbol_conf.annotate_data_sample) {
2783 annotated_data_type__update_samples(mem_type, evsel,
2784 dloc.type_offset,
2785 he->stat.nr_events,
2786 he->stat.period);
2787 }
2788 he->mem_type_off = dloc.type_offset;
2789 return mem_type;
2790 }
2791
2792 /*
2793 * Some instructions can be fused and the actual memory access came
2794 * from the previous instruction.
2795 */
2796 if (dl->al.offset > 0) {
2797 struct annotation *notes;
2798 struct disasm_line *prev_dl;
2799
2800 notes = symbol__annotation(ms->sym);
2801 prev_dl = annotation__prev_asm_line(notes, dl);
2802
2803 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
2804 dl = prev_dl;
2805 goto retry;
2806 }
2807 }
2808
2809 ann_data_stat.no_mem_ops++;
2810 istat->bad++;
2811 return NULL;
2812}
2813
2814/* Basic block traversal (BFS) data structure */
2815struct basic_block_data {
2816 struct list_head queue;
2817 struct list_head visited;
2818};
2819
2820/*
2821 * During the traversal, it needs to know the parent block where the current
2822 * block block started from. Note that single basic block can be parent of
2823 * two child basic blocks (in case of condition jump).
2824 */
2825struct basic_block_link {
2826 struct list_head node;
2827 struct basic_block_link *parent;
2828 struct annotated_basic_block *bb;
2829};
2830
2831/* Check any of basic block in the list already has the offset */
2832static bool basic_block_has_offset(struct list_head *head, s64 offset)
2833{
2834 struct basic_block_link *link;
2835
2836 list_for_each_entry(link, head, node) {
2837 s64 begin_offset = link->bb->begin->al.offset;
2838 s64 end_offset = link->bb->end->al.offset;
2839
2840 if (begin_offset <= offset && offset <= end_offset)
2841 return true;
2842 }
2843 return false;
2844}
2845
2846static bool is_new_basic_block(struct basic_block_data *bb_data,
2847 struct disasm_line *dl)
2848{
2849 s64 offset = dl->al.offset;
2850
2851 if (basic_block_has_offset(&bb_data->visited, offset))
2852 return false;
2853 if (basic_block_has_offset(&bb_data->queue, offset))
2854 return false;
2855 return true;
2856}
2857
2858/* Add a basic block starting from dl and link it to the parent */
2859static int add_basic_block(struct basic_block_data *bb_data,
2860 struct basic_block_link *parent,
2861 struct disasm_line *dl)
2862{
2863 struct annotated_basic_block *bb;
2864 struct basic_block_link *link;
2865
2866 if (dl == NULL)
2867 return -1;
2868
2869 if (!is_new_basic_block(bb_data, dl))
2870 return 0;
2871
2872 bb = zalloc(sizeof(*bb));
2873 if (bb == NULL)
2874 return -1;
2875
2876 bb->begin = dl;
2877 bb->end = dl;
2878 INIT_LIST_HEAD(&bb->list);
2879
2880 link = malloc(sizeof(*link));
2881 if (link == NULL) {
2882 free(bb);
2883 return -1;
2884 }
2885
2886 link->bb = bb;
2887 link->parent = parent;
2888 list_add_tail(&link->node, &bb_data->queue);
2889 return 0;
2890}
2891
2892/* Returns true when it finds the target in the current basic block */
2893static bool process_basic_block(struct basic_block_data *bb_data,
2894 struct basic_block_link *link,
2895 struct symbol *sym, u64 target)
2896{
2897 struct disasm_line *dl, *next_dl, *last_dl;
2898 struct annotation *notes = symbol__annotation(sym);
2899 bool found = false;
2900
2901 dl = link->bb->begin;
2902 /* Check if it's already visited */
2903 if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
2904 return false;
2905
2906 last_dl = list_last_entry(¬es->src->source,
2907 struct disasm_line, al.node);
2908 if (last_dl->al.offset == -1)
2909 last_dl = annotation__prev_asm_line(notes, last_dl);
2910
2911 if (last_dl == NULL)
2912 return false;
2913
2914 list_for_each_entry_from(dl, ¬es->src->source, al.node) {
2915 /* Skip comment or debug info line */
2916 if (dl->al.offset == -1)
2917 continue;
2918 /* Found the target instruction */
2919 if (sym->start + dl->al.offset == target) {
2920 found = true;
2921 break;
2922 }
2923 /* End of the function, finish the block */
2924 if (dl == last_dl)
2925 break;
2926 /* 'return' instruction finishes the block */
2927 if (ins__is_ret(&dl->ins))
2928 break;
2929 /* normal instructions are part of the basic block */
2930 if (!ins__is_jump(&dl->ins))
2931 continue;
2932 /* jump to a different function, tail call or return */
2933 if (dl->ops.target.outside)
2934 break;
2935 /* jump instruction creates new basic block(s) */
2936 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
2937 /*allow_update=*/false);
2938 if (next_dl)
2939 add_basic_block(bb_data, link, next_dl);
2940
2941 /*
2942 * FIXME: determine conditional jumps properly.
2943 * Conditional jumps create another basic block with the
2944 * next disasm line.
2945 */
2946 if (!strstr(dl->ins.name, "jmp")) {
2947 next_dl = annotation__next_asm_line(notes, dl);
2948 if (next_dl)
2949 add_basic_block(bb_data, link, next_dl);
2950 }
2951 break;
2952
2953 }
2954 link->bb->end = dl;
2955 return found;
2956}
2957
2958/*
2959 * It founds a target basic block, build a proper linked list of basic blocks
2960 * by following the link recursively.
2961 */
2962static void link_found_basic_blocks(struct basic_block_link *link,
2963 struct list_head *head)
2964{
2965 while (link) {
2966 struct basic_block_link *parent = link->parent;
2967
2968 list_move(&link->bb->list, head);
2969 list_del(&link->node);
2970 free(link);
2971
2972 link = parent;
2973 }
2974}
2975
2976static void delete_basic_blocks(struct basic_block_data *bb_data)
2977{
2978 struct basic_block_link *link, *tmp;
2979
2980 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
2981 list_del(&link->node);
2982 zfree(&link->bb);
2983 free(link);
2984 }
2985
2986 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
2987 list_del(&link->node);
2988 zfree(&link->bb);
2989 free(link);
2990 }
2991}
2992
2993/**
2994 * annotate_get_basic_blocks - Get basic blocks for given address range
2995 * @sym: symbol to annotate
2996 * @src: source address
2997 * @dst: destination address
2998 * @head: list head to save basic blocks
2999 *
3000 * This function traverses disasm_lines from @src to @dst and save them in a
3001 * list of annotated_basic_block to @head. It uses BFS to find the shortest
3002 * path between two. The basic_block_link is to maintain parent links so
3003 * that it can build a list of blocks from the start.
3004 */
3005int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
3006 struct list_head *head)
3007{
3008 struct basic_block_data bb_data = {
3009 .queue = LIST_HEAD_INIT(bb_data.queue),
3010 .visited = LIST_HEAD_INIT(bb_data.visited),
3011 };
3012 struct basic_block_link *link;
3013 struct disasm_line *dl;
3014 int ret = -1;
3015
3016 dl = find_disasm_line(sym, src, /*allow_update=*/false);
3017 if (dl == NULL)
3018 return -1;
3019
3020 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
3021 return -1;
3022
3023 /* Find shortest path from src to dst using BFS */
3024 while (!list_empty(&bb_data.queue)) {
3025 link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
3026
3027 if (process_basic_block(&bb_data, link, sym, dst)) {
3028 link_found_basic_blocks(link, head);
3029 ret = 0;
3030 break;
3031 }
3032 list_move(&link->node, &bb_data.visited);
3033 }
3034 delete_basic_blocks(&bb_data);
3035 return ret;
3036}