Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <regex.h>
5#include <stdlib.h>
6#include <linux/mman.h>
7#include <linux/time64.h>
8#include "debug.h"
9#include "dso.h"
10#include "sort.h"
11#include "hist.h"
12#include "cacheline.h"
13#include "comm.h"
14#include "map.h"
15#include "maps.h"
16#include "symbol.h"
17#include "map_symbol.h"
18#include "branch.h"
19#include "thread.h"
20#include "evsel.h"
21#include "evlist.h"
22#include "srcline.h"
23#include "strlist.h"
24#include "strbuf.h"
25#include "mem-events.h"
26#include "annotate.h"
27#include "event.h"
28#include "time-utils.h"
29#include "cgroup.h"
30#include "machine.h"
31#include <linux/kernel.h>
32#include <linux/string.h>
33
34#ifdef HAVE_LIBTRACEEVENT
35#include <traceevent/event-parse.h>
36#endif
37
38regex_t parent_regex;
39const char default_parent_pattern[] = "^sys_|^do_page_fault";
40const char *parent_pattern = default_parent_pattern;
41const char *default_sort_order = "comm,dso,symbol";
42const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
43const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
44const char default_top_sort_order[] = "dso,symbol";
45const char default_diff_sort_order[] = "dso,symbol";
46const char default_tracepoint_sort_order[] = "trace";
47const char *sort_order;
48const char *field_order;
49regex_t ignore_callees_regex;
50int have_ignore_callees = 0;
51enum sort_mode sort__mode = SORT_MODE__NORMAL;
52static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
53static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
54
55/*
56 * Replaces all occurrences of a char used with the:
57 *
58 * -t, --field-separator
59 *
60 * option, that uses a special separator character and don't pad with spaces,
61 * replacing all occurrences of this separator in symbol names (and other
62 * output) with a '.' character, that thus it's the only non valid separator.
63*/
64static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
65{
66 int n;
67 va_list ap;
68
69 va_start(ap, fmt);
70 n = vsnprintf(bf, size, fmt, ap);
71 if (symbol_conf.field_sep && n > 0) {
72 char *sep = bf;
73
74 while (1) {
75 sep = strchr(sep, *symbol_conf.field_sep);
76 if (sep == NULL)
77 break;
78 *sep = '.';
79 }
80 }
81 va_end(ap);
82
83 if (n >= (int)size)
84 return size - 1;
85 return n;
86}
87
88static int64_t cmp_null(const void *l, const void *r)
89{
90 if (!l && !r)
91 return 0;
92 else if (!l)
93 return -1;
94 else
95 return 1;
96}
97
98/* --sort pid */
99
100static int64_t
101sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
102{
103 return right->thread->tid - left->thread->tid;
104}
105
106static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
107 size_t size, unsigned int width)
108{
109 const char *comm = thread__comm_str(he->thread);
110
111 width = max(7U, width) - 8;
112 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
113 width, width, comm ?: "");
114}
115
116static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
117{
118 const struct thread *th = arg;
119
120 if (type != HIST_FILTER__THREAD)
121 return -1;
122
123 return th && he->thread != th;
124}
125
126struct sort_entry sort_thread = {
127 .se_header = " Pid:Command",
128 .se_cmp = sort__thread_cmp,
129 .se_snprintf = hist_entry__thread_snprintf,
130 .se_filter = hist_entry__thread_filter,
131 .se_width_idx = HISTC_THREAD,
132};
133
134/* --sort comm */
135
136/*
137 * We can't use pointer comparison in functions below,
138 * because it gives different results based on pointer
139 * values, which could break some sorting assumptions.
140 */
141static int64_t
142sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
143{
144 return strcmp(comm__str(right->comm), comm__str(left->comm));
145}
146
147static int64_t
148sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
149{
150 return strcmp(comm__str(right->comm), comm__str(left->comm));
151}
152
153static int64_t
154sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
155{
156 return strcmp(comm__str(right->comm), comm__str(left->comm));
157}
158
159static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
160 size_t size, unsigned int width)
161{
162 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
163}
164
165struct sort_entry sort_comm = {
166 .se_header = "Command",
167 .se_cmp = sort__comm_cmp,
168 .se_collapse = sort__comm_collapse,
169 .se_sort = sort__comm_sort,
170 .se_snprintf = hist_entry__comm_snprintf,
171 .se_filter = hist_entry__thread_filter,
172 .se_width_idx = HISTC_COMM,
173};
174
175/* --sort dso */
176
177static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
178{
179 struct dso *dso_l = map_l ? map_l->dso : NULL;
180 struct dso *dso_r = map_r ? map_r->dso : NULL;
181 const char *dso_name_l, *dso_name_r;
182
183 if (!dso_l || !dso_r)
184 return cmp_null(dso_r, dso_l);
185
186 if (verbose > 0) {
187 dso_name_l = dso_l->long_name;
188 dso_name_r = dso_r->long_name;
189 } else {
190 dso_name_l = dso_l->short_name;
191 dso_name_r = dso_r->short_name;
192 }
193
194 return strcmp(dso_name_l, dso_name_r);
195}
196
197static int64_t
198sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
199{
200 return _sort__dso_cmp(right->ms.map, left->ms.map);
201}
202
203static int _hist_entry__dso_snprintf(struct map *map, char *bf,
204 size_t size, unsigned int width)
205{
206 if (map && map->dso) {
207 const char *dso_name = verbose > 0 ? map->dso->long_name :
208 map->dso->short_name;
209 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
210 }
211
212 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
213}
214
215static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
216 size_t size, unsigned int width)
217{
218 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
219}
220
221static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
222{
223 const struct dso *dso = arg;
224
225 if (type != HIST_FILTER__DSO)
226 return -1;
227
228 return dso && (!he->ms.map || he->ms.map->dso != dso);
229}
230
231struct sort_entry sort_dso = {
232 .se_header = "Shared Object",
233 .se_cmp = sort__dso_cmp,
234 .se_snprintf = hist_entry__dso_snprintf,
235 .se_filter = hist_entry__dso_filter,
236 .se_width_idx = HISTC_DSO,
237};
238
239/* --sort symbol */
240
241static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
242{
243 return (int64_t)(right_ip - left_ip);
244}
245
246int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
247{
248 if (!sym_l || !sym_r)
249 return cmp_null(sym_l, sym_r);
250
251 if (sym_l == sym_r)
252 return 0;
253
254 if (sym_l->inlined || sym_r->inlined) {
255 int ret = strcmp(sym_l->name, sym_r->name);
256
257 if (ret)
258 return ret;
259 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
260 return 0;
261 }
262
263 if (sym_l->start != sym_r->start)
264 return (int64_t)(sym_r->start - sym_l->start);
265
266 return (int64_t)(sym_r->end - sym_l->end);
267}
268
269static int64_t
270sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
271{
272 int64_t ret;
273
274 if (!left->ms.sym && !right->ms.sym)
275 return _sort__addr_cmp(left->ip, right->ip);
276
277 /*
278 * comparing symbol address alone is not enough since it's a
279 * relative address within a dso.
280 */
281 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
282 ret = sort__dso_cmp(left, right);
283 if (ret != 0)
284 return ret;
285 }
286
287 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
288}
289
290static int64_t
291sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
292{
293 if (!left->ms.sym || !right->ms.sym)
294 return cmp_null(left->ms.sym, right->ms.sym);
295
296 return strcmp(right->ms.sym->name, left->ms.sym->name);
297}
298
299static int _hist_entry__sym_snprintf(struct map_symbol *ms,
300 u64 ip, char level, char *bf, size_t size,
301 unsigned int width)
302{
303 struct symbol *sym = ms->sym;
304 struct map *map = ms->map;
305 size_t ret = 0;
306
307 if (verbose > 0) {
308 char o = map ? dso__symtab_origin(map->dso) : '!';
309 u64 rip = ip;
310
311 if (map && map->dso && map->dso->kernel
312 && map->dso->adjust_symbols)
313 rip = map->unmap_ip(map, ip);
314
315 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
316 BITS_PER_LONG / 4 + 2, rip, o);
317 }
318
319 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
320 if (sym && map) {
321 if (sym->type == STT_OBJECT) {
322 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
323 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
324 ip - map->unmap_ip(map, sym->start));
325 } else {
326 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
327 width - ret,
328 sym->name);
329 if (sym->inlined)
330 ret += repsep_snprintf(bf + ret, size - ret,
331 " (inlined)");
332 }
333 } else {
334 size_t len = BITS_PER_LONG / 4;
335 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
336 len, ip);
337 }
338
339 return ret;
340}
341
342int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
343{
344 return _hist_entry__sym_snprintf(&he->ms, he->ip,
345 he->level, bf, size, width);
346}
347
348static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
349{
350 const char *sym = arg;
351
352 if (type != HIST_FILTER__SYMBOL)
353 return -1;
354
355 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
356}
357
358struct sort_entry sort_sym = {
359 .se_header = "Symbol",
360 .se_cmp = sort__sym_cmp,
361 .se_sort = sort__sym_sort,
362 .se_snprintf = hist_entry__sym_snprintf,
363 .se_filter = hist_entry__sym_filter,
364 .se_width_idx = HISTC_SYMBOL,
365};
366
367/* --sort srcline */
368
369char *hist_entry__srcline(struct hist_entry *he)
370{
371 return map__srcline(he->ms.map, he->ip, he->ms.sym);
372}
373
374static int64_t
375sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
376{
377 int64_t ret;
378
379 ret = _sort__addr_cmp(left->ip, right->ip);
380 if (ret)
381 return ret;
382
383 return sort__dso_cmp(left, right);
384}
385
386static int64_t
387sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
388{
389 if (!left->srcline)
390 left->srcline = hist_entry__srcline(left);
391 if (!right->srcline)
392 right->srcline = hist_entry__srcline(right);
393
394 return strcmp(right->srcline, left->srcline);
395}
396
397static int64_t
398sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
399{
400 return sort__srcline_collapse(left, right);
401}
402
403static void
404sort__srcline_init(struct hist_entry *he)
405{
406 if (!he->srcline)
407 he->srcline = hist_entry__srcline(he);
408}
409
410static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
411 size_t size, unsigned int width)
412{
413 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
414}
415
416struct sort_entry sort_srcline = {
417 .se_header = "Source:Line",
418 .se_cmp = sort__srcline_cmp,
419 .se_collapse = sort__srcline_collapse,
420 .se_sort = sort__srcline_sort,
421 .se_init = sort__srcline_init,
422 .se_snprintf = hist_entry__srcline_snprintf,
423 .se_width_idx = HISTC_SRCLINE,
424};
425
426/* --sort srcline_from */
427
428static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
429{
430 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
431}
432
433static int64_t
434sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
435{
436 return left->branch_info->from.addr - right->branch_info->from.addr;
437}
438
439static int64_t
440sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
441{
442 if (!left->branch_info->srcline_from)
443 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
444
445 if (!right->branch_info->srcline_from)
446 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
447
448 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
449}
450
451static int64_t
452sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
453{
454 return sort__srcline_from_collapse(left, right);
455}
456
457static void sort__srcline_from_init(struct hist_entry *he)
458{
459 if (!he->branch_info->srcline_from)
460 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
461}
462
463static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
464 size_t size, unsigned int width)
465{
466 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
467}
468
469struct sort_entry sort_srcline_from = {
470 .se_header = "From Source:Line",
471 .se_cmp = sort__srcline_from_cmp,
472 .se_collapse = sort__srcline_from_collapse,
473 .se_sort = sort__srcline_from_sort,
474 .se_init = sort__srcline_from_init,
475 .se_snprintf = hist_entry__srcline_from_snprintf,
476 .se_width_idx = HISTC_SRCLINE_FROM,
477};
478
479/* --sort srcline_to */
480
481static int64_t
482sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
483{
484 return left->branch_info->to.addr - right->branch_info->to.addr;
485}
486
487static int64_t
488sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
489{
490 if (!left->branch_info->srcline_to)
491 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
492
493 if (!right->branch_info->srcline_to)
494 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
495
496 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
497}
498
499static int64_t
500sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
501{
502 return sort__srcline_to_collapse(left, right);
503}
504
505static void sort__srcline_to_init(struct hist_entry *he)
506{
507 if (!he->branch_info->srcline_to)
508 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
509}
510
511static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
512 size_t size, unsigned int width)
513{
514 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
515}
516
517struct sort_entry sort_srcline_to = {
518 .se_header = "To Source:Line",
519 .se_cmp = sort__srcline_to_cmp,
520 .se_collapse = sort__srcline_to_collapse,
521 .se_sort = sort__srcline_to_sort,
522 .se_init = sort__srcline_to_init,
523 .se_snprintf = hist_entry__srcline_to_snprintf,
524 .se_width_idx = HISTC_SRCLINE_TO,
525};
526
527static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
528 size_t size, unsigned int width)
529{
530
531 struct symbol *sym = he->ms.sym;
532 struct annotation *notes;
533 double ipc = 0.0, coverage = 0.0;
534 char tmp[64];
535
536 if (!sym)
537 return repsep_snprintf(bf, size, "%-*s", width, "-");
538
539 notes = symbol__annotation(sym);
540
541 if (notes->hit_cycles)
542 ipc = notes->hit_insn / ((double)notes->hit_cycles);
543
544 if (notes->total_insn) {
545 coverage = notes->cover_insn * 100.0 /
546 ((double)notes->total_insn);
547 }
548
549 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
550 return repsep_snprintf(bf, size, "%-*s", width, tmp);
551}
552
553struct sort_entry sort_sym_ipc = {
554 .se_header = "IPC [IPC Coverage]",
555 .se_cmp = sort__sym_cmp,
556 .se_snprintf = hist_entry__sym_ipc_snprintf,
557 .se_width_idx = HISTC_SYMBOL_IPC,
558};
559
560static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
561 __maybe_unused,
562 char *bf, size_t size,
563 unsigned int width)
564{
565 char tmp[64];
566
567 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
568 return repsep_snprintf(bf, size, "%-*s", width, tmp);
569}
570
571struct sort_entry sort_sym_ipc_null = {
572 .se_header = "IPC [IPC Coverage]",
573 .se_cmp = sort__sym_cmp,
574 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
575 .se_width_idx = HISTC_SYMBOL_IPC,
576};
577
578/* --sort srcfile */
579
580static char no_srcfile[1];
581
582static char *hist_entry__get_srcfile(struct hist_entry *e)
583{
584 char *sf, *p;
585 struct map *map = e->ms.map;
586
587 if (!map)
588 return no_srcfile;
589
590 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
591 e->ms.sym, false, true, true, e->ip);
592 if (!strcmp(sf, SRCLINE_UNKNOWN))
593 return no_srcfile;
594 p = strchr(sf, ':');
595 if (p && *sf) {
596 *p = 0;
597 return sf;
598 }
599 free(sf);
600 return no_srcfile;
601}
602
603static int64_t
604sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
605{
606 if (!left->srcfile)
607 left->srcfile = hist_entry__get_srcfile(left);
608 if (!right->srcfile)
609 right->srcfile = hist_entry__get_srcfile(right);
610
611 return strcmp(right->srcfile, left->srcfile);
612}
613
614static int64_t
615sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
616{
617 if (!left->srcfile)
618 left->srcfile = hist_entry__get_srcfile(left);
619 if (!right->srcfile)
620 right->srcfile = hist_entry__get_srcfile(right);
621
622 return strcmp(right->srcfile, left->srcfile);
623}
624
625static int64_t
626sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
627{
628 return sort__srcfile_collapse(left, right);
629}
630
631static void sort__srcfile_init(struct hist_entry *he)
632{
633 if (!he->srcfile)
634 he->srcfile = hist_entry__get_srcfile(he);
635}
636
637static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
638 size_t size, unsigned int width)
639{
640 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
641}
642
643struct sort_entry sort_srcfile = {
644 .se_header = "Source File",
645 .se_cmp = sort__srcfile_cmp,
646 .se_collapse = sort__srcfile_collapse,
647 .se_sort = sort__srcfile_sort,
648 .se_init = sort__srcfile_init,
649 .se_snprintf = hist_entry__srcfile_snprintf,
650 .se_width_idx = HISTC_SRCFILE,
651};
652
653/* --sort parent */
654
655static int64_t
656sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
657{
658 struct symbol *sym_l = left->parent;
659 struct symbol *sym_r = right->parent;
660
661 if (!sym_l || !sym_r)
662 return cmp_null(sym_l, sym_r);
663
664 return strcmp(sym_r->name, sym_l->name);
665}
666
667static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
668 size_t size, unsigned int width)
669{
670 return repsep_snprintf(bf, size, "%-*.*s", width, width,
671 he->parent ? he->parent->name : "[other]");
672}
673
674struct sort_entry sort_parent = {
675 .se_header = "Parent symbol",
676 .se_cmp = sort__parent_cmp,
677 .se_snprintf = hist_entry__parent_snprintf,
678 .se_width_idx = HISTC_PARENT,
679};
680
681/* --sort cpu */
682
683static int64_t
684sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
685{
686 return right->cpu - left->cpu;
687}
688
689static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
690 size_t size, unsigned int width)
691{
692 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
693}
694
695struct sort_entry sort_cpu = {
696 .se_header = "CPU",
697 .se_cmp = sort__cpu_cmp,
698 .se_snprintf = hist_entry__cpu_snprintf,
699 .se_width_idx = HISTC_CPU,
700};
701
702/* --sort cgroup_id */
703
704static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
705{
706 return (int64_t)(right_dev - left_dev);
707}
708
709static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
710{
711 return (int64_t)(right_ino - left_ino);
712}
713
714static int64_t
715sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
716{
717 int64_t ret;
718
719 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
720 if (ret != 0)
721 return ret;
722
723 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
724 left->cgroup_id.ino);
725}
726
727static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
728 char *bf, size_t size,
729 unsigned int width __maybe_unused)
730{
731 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
732 he->cgroup_id.ino);
733}
734
735struct sort_entry sort_cgroup_id = {
736 .se_header = "cgroup id (dev/inode)",
737 .se_cmp = sort__cgroup_id_cmp,
738 .se_snprintf = hist_entry__cgroup_id_snprintf,
739 .se_width_idx = HISTC_CGROUP_ID,
740};
741
742/* --sort cgroup */
743
744static int64_t
745sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
746{
747 return right->cgroup - left->cgroup;
748}
749
750static int hist_entry__cgroup_snprintf(struct hist_entry *he,
751 char *bf, size_t size,
752 unsigned int width __maybe_unused)
753{
754 const char *cgrp_name = "N/A";
755
756 if (he->cgroup) {
757 struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
758 he->cgroup);
759 if (cgrp != NULL)
760 cgrp_name = cgrp->name;
761 else
762 cgrp_name = "unknown";
763 }
764
765 return repsep_snprintf(bf, size, "%s", cgrp_name);
766}
767
768struct sort_entry sort_cgroup = {
769 .se_header = "Cgroup",
770 .se_cmp = sort__cgroup_cmp,
771 .se_snprintf = hist_entry__cgroup_snprintf,
772 .se_width_idx = HISTC_CGROUP,
773};
774
775/* --sort socket */
776
777static int64_t
778sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
779{
780 return right->socket - left->socket;
781}
782
783static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
784 size_t size, unsigned int width)
785{
786 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
787}
788
789static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
790{
791 int sk = *(const int *)arg;
792
793 if (type != HIST_FILTER__SOCKET)
794 return -1;
795
796 return sk >= 0 && he->socket != sk;
797}
798
799struct sort_entry sort_socket = {
800 .se_header = "Socket",
801 .se_cmp = sort__socket_cmp,
802 .se_snprintf = hist_entry__socket_snprintf,
803 .se_filter = hist_entry__socket_filter,
804 .se_width_idx = HISTC_SOCKET,
805};
806
807/* --sort time */
808
809static int64_t
810sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
811{
812 return right->time - left->time;
813}
814
815static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
816 size_t size, unsigned int width)
817{
818 char he_time[32];
819
820 if (symbol_conf.nanosecs)
821 timestamp__scnprintf_nsec(he->time, he_time,
822 sizeof(he_time));
823 else
824 timestamp__scnprintf_usec(he->time, he_time,
825 sizeof(he_time));
826
827 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
828}
829
830struct sort_entry sort_time = {
831 .se_header = "Time",
832 .se_cmp = sort__time_cmp,
833 .se_snprintf = hist_entry__time_snprintf,
834 .se_width_idx = HISTC_TIME,
835};
836
837/* --sort trace */
838
839#ifdef HAVE_LIBTRACEEVENT
840static char *get_trace_output(struct hist_entry *he)
841{
842 struct trace_seq seq;
843 struct evsel *evsel;
844 struct tep_record rec = {
845 .data = he->raw_data,
846 .size = he->raw_size,
847 };
848
849 evsel = hists_to_evsel(he->hists);
850
851 trace_seq_init(&seq);
852 if (symbol_conf.raw_trace) {
853 tep_print_fields(&seq, he->raw_data, he->raw_size,
854 evsel->tp_format);
855 } else {
856 tep_print_event(evsel->tp_format->tep,
857 &seq, &rec, "%s", TEP_PRINT_INFO);
858 }
859 /*
860 * Trim the buffer, it starts at 4KB and we're not going to
861 * add anything more to this buffer.
862 */
863 return realloc(seq.buffer, seq.len + 1);
864}
865
866static int64_t
867sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
868{
869 struct evsel *evsel;
870
871 evsel = hists_to_evsel(left->hists);
872 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
873 return 0;
874
875 if (left->trace_output == NULL)
876 left->trace_output = get_trace_output(left);
877 if (right->trace_output == NULL)
878 right->trace_output = get_trace_output(right);
879
880 return strcmp(right->trace_output, left->trace_output);
881}
882
883static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
884 size_t size, unsigned int width)
885{
886 struct evsel *evsel;
887
888 evsel = hists_to_evsel(he->hists);
889 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
890 return scnprintf(bf, size, "%-.*s", width, "N/A");
891
892 if (he->trace_output == NULL)
893 he->trace_output = get_trace_output(he);
894 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
895}
896
897struct sort_entry sort_trace = {
898 .se_header = "Trace output",
899 .se_cmp = sort__trace_cmp,
900 .se_snprintf = hist_entry__trace_snprintf,
901 .se_width_idx = HISTC_TRACE,
902};
903#endif /* HAVE_LIBTRACEEVENT */
904
905/* sort keys for branch stacks */
906
907static int64_t
908sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
909{
910 if (!left->branch_info || !right->branch_info)
911 return cmp_null(left->branch_info, right->branch_info);
912
913 return _sort__dso_cmp(left->branch_info->from.ms.map,
914 right->branch_info->from.ms.map);
915}
916
917static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
918 size_t size, unsigned int width)
919{
920 if (he->branch_info)
921 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
922 bf, size, width);
923 else
924 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
925}
926
927static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
928 const void *arg)
929{
930 const struct dso *dso = arg;
931
932 if (type != HIST_FILTER__DSO)
933 return -1;
934
935 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
936 he->branch_info->from.ms.map->dso != dso);
937}
938
939static int64_t
940sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
941{
942 if (!left->branch_info || !right->branch_info)
943 return cmp_null(left->branch_info, right->branch_info);
944
945 return _sort__dso_cmp(left->branch_info->to.ms.map,
946 right->branch_info->to.ms.map);
947}
948
949static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
950 size_t size, unsigned int width)
951{
952 if (he->branch_info)
953 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
954 bf, size, width);
955 else
956 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
957}
958
959static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
960 const void *arg)
961{
962 const struct dso *dso = arg;
963
964 if (type != HIST_FILTER__DSO)
965 return -1;
966
967 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
968 he->branch_info->to.ms.map->dso != dso);
969}
970
971static int64_t
972sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
973{
974 struct addr_map_symbol *from_l = &left->branch_info->from;
975 struct addr_map_symbol *from_r = &right->branch_info->from;
976
977 if (!left->branch_info || !right->branch_info)
978 return cmp_null(left->branch_info, right->branch_info);
979
980 from_l = &left->branch_info->from;
981 from_r = &right->branch_info->from;
982
983 if (!from_l->ms.sym && !from_r->ms.sym)
984 return _sort__addr_cmp(from_l->addr, from_r->addr);
985
986 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
987}
988
989static int64_t
990sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
991{
992 struct addr_map_symbol *to_l, *to_r;
993
994 if (!left->branch_info || !right->branch_info)
995 return cmp_null(left->branch_info, right->branch_info);
996
997 to_l = &left->branch_info->to;
998 to_r = &right->branch_info->to;
999
1000 if (!to_l->ms.sym && !to_r->ms.sym)
1001 return _sort__addr_cmp(to_l->addr, to_r->addr);
1002
1003 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1004}
1005
1006static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1007 size_t size, unsigned int width)
1008{
1009 if (he->branch_info) {
1010 struct addr_map_symbol *from = &he->branch_info->from;
1011
1012 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1013 from->al_level, bf, size, width);
1014 }
1015
1016 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1017}
1018
1019static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1020 size_t size, unsigned int width)
1021{
1022 if (he->branch_info) {
1023 struct addr_map_symbol *to = &he->branch_info->to;
1024
1025 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1026 to->al_level, bf, size, width);
1027 }
1028
1029 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1030}
1031
1032static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1033 const void *arg)
1034{
1035 const char *sym = arg;
1036
1037 if (type != HIST_FILTER__SYMBOL)
1038 return -1;
1039
1040 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1041 strstr(he->branch_info->from.ms.sym->name, sym));
1042}
1043
1044static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1045 const void *arg)
1046{
1047 const char *sym = arg;
1048
1049 if (type != HIST_FILTER__SYMBOL)
1050 return -1;
1051
1052 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1053 strstr(he->branch_info->to.ms.sym->name, sym));
1054}
1055
1056struct sort_entry sort_dso_from = {
1057 .se_header = "Source Shared Object",
1058 .se_cmp = sort__dso_from_cmp,
1059 .se_snprintf = hist_entry__dso_from_snprintf,
1060 .se_filter = hist_entry__dso_from_filter,
1061 .se_width_idx = HISTC_DSO_FROM,
1062};
1063
1064struct sort_entry sort_dso_to = {
1065 .se_header = "Target Shared Object",
1066 .se_cmp = sort__dso_to_cmp,
1067 .se_snprintf = hist_entry__dso_to_snprintf,
1068 .se_filter = hist_entry__dso_to_filter,
1069 .se_width_idx = HISTC_DSO_TO,
1070};
1071
1072struct sort_entry sort_sym_from = {
1073 .se_header = "Source Symbol",
1074 .se_cmp = sort__sym_from_cmp,
1075 .se_snprintf = hist_entry__sym_from_snprintf,
1076 .se_filter = hist_entry__sym_from_filter,
1077 .se_width_idx = HISTC_SYMBOL_FROM,
1078};
1079
1080struct sort_entry sort_sym_to = {
1081 .se_header = "Target Symbol",
1082 .se_cmp = sort__sym_to_cmp,
1083 .se_snprintf = hist_entry__sym_to_snprintf,
1084 .se_filter = hist_entry__sym_to_filter,
1085 .se_width_idx = HISTC_SYMBOL_TO,
1086};
1087
1088static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1089 u64 ip, char level, char *bf, size_t size,
1090 unsigned int width)
1091{
1092 struct symbol *sym = ms->sym;
1093 struct map *map = ms->map;
1094 size_t ret = 0, offs;
1095
1096 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1097 if (sym && map) {
1098 if (sym->type == STT_OBJECT) {
1099 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1100 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1101 ip - map->unmap_ip(map, sym->start));
1102 } else {
1103 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1104 width - ret,
1105 sym->name);
1106 offs = ip - sym->start;
1107 if (offs)
1108 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1109 }
1110 } else {
1111 size_t len = BITS_PER_LONG / 4;
1112 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1113 len, ip);
1114 }
1115
1116 return ret;
1117}
1118
1119static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1120 size_t size, unsigned int width)
1121{
1122 if (he->branch_info) {
1123 struct addr_map_symbol *from = &he->branch_info->from;
1124
1125 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1126 he->level, bf, size, width);
1127 }
1128
1129 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1130}
1131
1132static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1133 size_t size, unsigned int width)
1134{
1135 if (he->branch_info) {
1136 struct addr_map_symbol *to = &he->branch_info->to;
1137
1138 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1139 he->level, bf, size, width);
1140 }
1141
1142 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1143}
1144
1145static int64_t
1146sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1147{
1148 struct addr_map_symbol *from_l;
1149 struct addr_map_symbol *from_r;
1150 int64_t ret;
1151
1152 if (!left->branch_info || !right->branch_info)
1153 return cmp_null(left->branch_info, right->branch_info);
1154
1155 from_l = &left->branch_info->from;
1156 from_r = &right->branch_info->from;
1157
1158 /*
1159 * comparing symbol address alone is not enough since it's a
1160 * relative address within a dso.
1161 */
1162 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1163 if (ret != 0)
1164 return ret;
1165
1166 return _sort__addr_cmp(from_l->addr, from_r->addr);
1167}
1168
1169static int64_t
1170sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1171{
1172 struct addr_map_symbol *to_l;
1173 struct addr_map_symbol *to_r;
1174 int64_t ret;
1175
1176 if (!left->branch_info || !right->branch_info)
1177 return cmp_null(left->branch_info, right->branch_info);
1178
1179 to_l = &left->branch_info->to;
1180 to_r = &right->branch_info->to;
1181
1182 /*
1183 * comparing symbol address alone is not enough since it's a
1184 * relative address within a dso.
1185 */
1186 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1187 if (ret != 0)
1188 return ret;
1189
1190 return _sort__addr_cmp(to_l->addr, to_r->addr);
1191}
1192
1193struct sort_entry sort_addr_from = {
1194 .se_header = "Source Address",
1195 .se_cmp = sort__addr_from_cmp,
1196 .se_snprintf = hist_entry__addr_from_snprintf,
1197 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1198 .se_width_idx = HISTC_ADDR_FROM,
1199};
1200
1201struct sort_entry sort_addr_to = {
1202 .se_header = "Target Address",
1203 .se_cmp = sort__addr_to_cmp,
1204 .se_snprintf = hist_entry__addr_to_snprintf,
1205 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1206 .se_width_idx = HISTC_ADDR_TO,
1207};
1208
1209
1210static int64_t
1211sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1212{
1213 unsigned char mp, p;
1214
1215 if (!left->branch_info || !right->branch_info)
1216 return cmp_null(left->branch_info, right->branch_info);
1217
1218 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1219 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1220 return mp || p;
1221}
1222
1223static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1224 size_t size, unsigned int width){
1225 static const char *out = "N/A";
1226
1227 if (he->branch_info) {
1228 if (he->branch_info->flags.predicted)
1229 out = "N";
1230 else if (he->branch_info->flags.mispred)
1231 out = "Y";
1232 }
1233
1234 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1235}
1236
1237static int64_t
1238sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1239{
1240 if (!left->branch_info || !right->branch_info)
1241 return cmp_null(left->branch_info, right->branch_info);
1242
1243 return left->branch_info->flags.cycles -
1244 right->branch_info->flags.cycles;
1245}
1246
1247static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1248 size_t size, unsigned int width)
1249{
1250 if (!he->branch_info)
1251 return scnprintf(bf, size, "%-.*s", width, "N/A");
1252 if (he->branch_info->flags.cycles == 0)
1253 return repsep_snprintf(bf, size, "%-*s", width, "-");
1254 return repsep_snprintf(bf, size, "%-*hd", width,
1255 he->branch_info->flags.cycles);
1256}
1257
1258struct sort_entry sort_cycles = {
1259 .se_header = "Basic Block Cycles",
1260 .se_cmp = sort__cycles_cmp,
1261 .se_snprintf = hist_entry__cycles_snprintf,
1262 .se_width_idx = HISTC_CYCLES,
1263};
1264
1265/* --sort daddr_sym */
1266int64_t
1267sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1268{
1269 uint64_t l = 0, r = 0;
1270
1271 if (left->mem_info)
1272 l = left->mem_info->daddr.addr;
1273 if (right->mem_info)
1274 r = right->mem_info->daddr.addr;
1275
1276 return (int64_t)(r - l);
1277}
1278
1279static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1280 size_t size, unsigned int width)
1281{
1282 uint64_t addr = 0;
1283 struct map_symbol *ms = NULL;
1284
1285 if (he->mem_info) {
1286 addr = he->mem_info->daddr.addr;
1287 ms = &he->mem_info->daddr.ms;
1288 }
1289 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1290}
1291
1292int64_t
1293sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1294{
1295 uint64_t l = 0, r = 0;
1296
1297 if (left->mem_info)
1298 l = left->mem_info->iaddr.addr;
1299 if (right->mem_info)
1300 r = right->mem_info->iaddr.addr;
1301
1302 return (int64_t)(r - l);
1303}
1304
1305static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1306 size_t size, unsigned int width)
1307{
1308 uint64_t addr = 0;
1309 struct map_symbol *ms = NULL;
1310
1311 if (he->mem_info) {
1312 addr = he->mem_info->iaddr.addr;
1313 ms = &he->mem_info->iaddr.ms;
1314 }
1315 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1316}
1317
1318static int64_t
1319sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1320{
1321 struct map *map_l = NULL;
1322 struct map *map_r = NULL;
1323
1324 if (left->mem_info)
1325 map_l = left->mem_info->daddr.ms.map;
1326 if (right->mem_info)
1327 map_r = right->mem_info->daddr.ms.map;
1328
1329 return _sort__dso_cmp(map_l, map_r);
1330}
1331
1332static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1333 size_t size, unsigned int width)
1334{
1335 struct map *map = NULL;
1336
1337 if (he->mem_info)
1338 map = he->mem_info->daddr.ms.map;
1339
1340 return _hist_entry__dso_snprintf(map, bf, size, width);
1341}
1342
1343static int64_t
1344sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1345{
1346 union perf_mem_data_src data_src_l;
1347 union perf_mem_data_src data_src_r;
1348
1349 if (left->mem_info)
1350 data_src_l = left->mem_info->data_src;
1351 else
1352 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1353
1354 if (right->mem_info)
1355 data_src_r = right->mem_info->data_src;
1356 else
1357 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1358
1359 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1360}
1361
1362static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1363 size_t size, unsigned int width)
1364{
1365 char out[10];
1366
1367 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1368 return repsep_snprintf(bf, size, "%.*s", width, out);
1369}
1370
1371static int64_t
1372sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1373{
1374 union perf_mem_data_src data_src_l;
1375 union perf_mem_data_src data_src_r;
1376
1377 if (left->mem_info)
1378 data_src_l = left->mem_info->data_src;
1379 else
1380 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1381
1382 if (right->mem_info)
1383 data_src_r = right->mem_info->data_src;
1384 else
1385 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1386
1387 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1388}
1389
1390static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1391 size_t size, unsigned int width)
1392{
1393 char out[64];
1394
1395 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1396 return repsep_snprintf(bf, size, "%-*s", width, out);
1397}
1398
1399static int64_t
1400sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1401{
1402 union perf_mem_data_src data_src_l;
1403 union perf_mem_data_src data_src_r;
1404
1405 if (left->mem_info)
1406 data_src_l = left->mem_info->data_src;
1407 else
1408 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1409
1410 if (right->mem_info)
1411 data_src_r = right->mem_info->data_src;
1412 else
1413 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1414
1415 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1416}
1417
1418static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1419 size_t size, unsigned int width)
1420{
1421 char out[64];
1422
1423 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1424 return repsep_snprintf(bf, size, "%-*s", width, out);
1425}
1426
1427static int64_t
1428sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1429{
1430 union perf_mem_data_src data_src_l;
1431 union perf_mem_data_src data_src_r;
1432
1433 if (left->mem_info)
1434 data_src_l = left->mem_info->data_src;
1435 else
1436 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1437
1438 if (right->mem_info)
1439 data_src_r = right->mem_info->data_src;
1440 else
1441 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1442
1443 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1444}
1445
1446static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1447 size_t size, unsigned int width)
1448{
1449 char out[64];
1450
1451 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1452 return repsep_snprintf(bf, size, "%-*s", width, out);
1453}
1454
1455int64_t
1456sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1457{
1458 u64 l, r;
1459 struct map *l_map, *r_map;
1460 int rc;
1461
1462 if (!left->mem_info) return -1;
1463 if (!right->mem_info) return 1;
1464
1465 /* group event types together */
1466 if (left->cpumode > right->cpumode) return -1;
1467 if (left->cpumode < right->cpumode) return 1;
1468
1469 l_map = left->mem_info->daddr.ms.map;
1470 r_map = right->mem_info->daddr.ms.map;
1471
1472 /* if both are NULL, jump to sort on al_addr instead */
1473 if (!l_map && !r_map)
1474 goto addr;
1475
1476 if (!l_map) return -1;
1477 if (!r_map) return 1;
1478
1479 rc = dso__cmp_id(l_map->dso, r_map->dso);
1480 if (rc)
1481 return rc;
1482 /*
1483 * Addresses with no major/minor numbers are assumed to be
1484 * anonymous in userspace. Sort those on pid then address.
1485 *
1486 * The kernel and non-zero major/minor mapped areas are
1487 * assumed to be unity mapped. Sort those on address.
1488 */
1489
1490 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1491 (!(l_map->flags & MAP_SHARED)) &&
1492 !l_map->dso->id.maj && !l_map->dso->id.min &&
1493 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
1494 /* userspace anonymous */
1495
1496 if (left->thread->pid_ > right->thread->pid_) return -1;
1497 if (left->thread->pid_ < right->thread->pid_) return 1;
1498 }
1499
1500addr:
1501 /* al_addr does all the right addr - start + offset calculations */
1502 l = cl_address(left->mem_info->daddr.al_addr);
1503 r = cl_address(right->mem_info->daddr.al_addr);
1504
1505 if (l > r) return -1;
1506 if (l < r) return 1;
1507
1508 return 0;
1509}
1510
1511static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1512 size_t size, unsigned int width)
1513{
1514
1515 uint64_t addr = 0;
1516 struct map_symbol *ms = NULL;
1517 char level = he->level;
1518
1519 if (he->mem_info) {
1520 struct map *map = he->mem_info->daddr.ms.map;
1521
1522 addr = cl_address(he->mem_info->daddr.al_addr);
1523 ms = &he->mem_info->daddr.ms;
1524
1525 /* print [s] for shared data mmaps */
1526 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1527 map && !(map->prot & PROT_EXEC) &&
1528 (map->flags & MAP_SHARED) &&
1529 (map->dso->id.maj || map->dso->id.min ||
1530 map->dso->id.ino || map->dso->id.ino_generation))
1531 level = 's';
1532 else if (!map)
1533 level = 'X';
1534 }
1535 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1536}
1537
1538struct sort_entry sort_mispredict = {
1539 .se_header = "Branch Mispredicted",
1540 .se_cmp = sort__mispredict_cmp,
1541 .se_snprintf = hist_entry__mispredict_snprintf,
1542 .se_width_idx = HISTC_MISPREDICT,
1543};
1544
1545static int64_t
1546sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1547{
1548 return left->weight - right->weight;
1549}
1550
1551static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1552 size_t size, unsigned int width)
1553{
1554 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1555}
1556
1557struct sort_entry sort_local_weight = {
1558 .se_header = "Local Weight",
1559 .se_cmp = sort__weight_cmp,
1560 .se_snprintf = hist_entry__local_weight_snprintf,
1561 .se_width_idx = HISTC_LOCAL_WEIGHT,
1562};
1563
1564static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1565 size_t size, unsigned int width)
1566{
1567 return repsep_snprintf(bf, size, "%-*llu", width,
1568 he->weight * he->stat.nr_events);
1569}
1570
1571struct sort_entry sort_global_weight = {
1572 .se_header = "Weight",
1573 .se_cmp = sort__weight_cmp,
1574 .se_snprintf = hist_entry__global_weight_snprintf,
1575 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1576};
1577
1578static int64_t
1579sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1580{
1581 return left->ins_lat - right->ins_lat;
1582}
1583
1584static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1585 size_t size, unsigned int width)
1586{
1587 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1588}
1589
1590struct sort_entry sort_local_ins_lat = {
1591 .se_header = "Local INSTR Latency",
1592 .se_cmp = sort__ins_lat_cmp,
1593 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1594 .se_width_idx = HISTC_LOCAL_INS_LAT,
1595};
1596
1597static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1598 size_t size, unsigned int width)
1599{
1600 return repsep_snprintf(bf, size, "%-*u", width,
1601 he->ins_lat * he->stat.nr_events);
1602}
1603
1604struct sort_entry sort_global_ins_lat = {
1605 .se_header = "INSTR Latency",
1606 .se_cmp = sort__ins_lat_cmp,
1607 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1608 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1609};
1610
1611static int64_t
1612sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1613{
1614 return left->p_stage_cyc - right->p_stage_cyc;
1615}
1616
1617static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1618 size_t size, unsigned int width)
1619{
1620 return repsep_snprintf(bf, size, "%-*u", width,
1621 he->p_stage_cyc * he->stat.nr_events);
1622}
1623
1624
1625static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1626 size_t size, unsigned int width)
1627{
1628 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1629}
1630
1631struct sort_entry sort_local_p_stage_cyc = {
1632 .se_header = "Local Pipeline Stage Cycle",
1633 .se_cmp = sort__p_stage_cyc_cmp,
1634 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1635 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1636};
1637
1638struct sort_entry sort_global_p_stage_cyc = {
1639 .se_header = "Pipeline Stage Cycle",
1640 .se_cmp = sort__p_stage_cyc_cmp,
1641 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1642 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1643};
1644
1645struct sort_entry sort_mem_daddr_sym = {
1646 .se_header = "Data Symbol",
1647 .se_cmp = sort__daddr_cmp,
1648 .se_snprintf = hist_entry__daddr_snprintf,
1649 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1650};
1651
1652struct sort_entry sort_mem_iaddr_sym = {
1653 .se_header = "Code Symbol",
1654 .se_cmp = sort__iaddr_cmp,
1655 .se_snprintf = hist_entry__iaddr_snprintf,
1656 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1657};
1658
1659struct sort_entry sort_mem_daddr_dso = {
1660 .se_header = "Data Object",
1661 .se_cmp = sort__dso_daddr_cmp,
1662 .se_snprintf = hist_entry__dso_daddr_snprintf,
1663 .se_width_idx = HISTC_MEM_DADDR_DSO,
1664};
1665
1666struct sort_entry sort_mem_locked = {
1667 .se_header = "Locked",
1668 .se_cmp = sort__locked_cmp,
1669 .se_snprintf = hist_entry__locked_snprintf,
1670 .se_width_idx = HISTC_MEM_LOCKED,
1671};
1672
1673struct sort_entry sort_mem_tlb = {
1674 .se_header = "TLB access",
1675 .se_cmp = sort__tlb_cmp,
1676 .se_snprintf = hist_entry__tlb_snprintf,
1677 .se_width_idx = HISTC_MEM_TLB,
1678};
1679
1680struct sort_entry sort_mem_lvl = {
1681 .se_header = "Memory access",
1682 .se_cmp = sort__lvl_cmp,
1683 .se_snprintf = hist_entry__lvl_snprintf,
1684 .se_width_idx = HISTC_MEM_LVL,
1685};
1686
1687struct sort_entry sort_mem_snoop = {
1688 .se_header = "Snoop",
1689 .se_cmp = sort__snoop_cmp,
1690 .se_snprintf = hist_entry__snoop_snprintf,
1691 .se_width_idx = HISTC_MEM_SNOOP,
1692};
1693
1694struct sort_entry sort_mem_dcacheline = {
1695 .se_header = "Data Cacheline",
1696 .se_cmp = sort__dcacheline_cmp,
1697 .se_snprintf = hist_entry__dcacheline_snprintf,
1698 .se_width_idx = HISTC_MEM_DCACHELINE,
1699};
1700
1701static int64_t
1702sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1703{
1704 union perf_mem_data_src data_src_l;
1705 union perf_mem_data_src data_src_r;
1706
1707 if (left->mem_info)
1708 data_src_l = left->mem_info->data_src;
1709 else
1710 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1711
1712 if (right->mem_info)
1713 data_src_r = right->mem_info->data_src;
1714 else
1715 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1716
1717 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1718}
1719
1720static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1721 size_t size, unsigned int width)
1722{
1723 char out[16];
1724
1725 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1726 return repsep_snprintf(bf, size, "%.*s", width, out);
1727}
1728
1729struct sort_entry sort_mem_blocked = {
1730 .se_header = "Blocked",
1731 .se_cmp = sort__blocked_cmp,
1732 .se_snprintf = hist_entry__blocked_snprintf,
1733 .se_width_idx = HISTC_MEM_BLOCKED,
1734};
1735
1736static int64_t
1737sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1738{
1739 uint64_t l = 0, r = 0;
1740
1741 if (left->mem_info)
1742 l = left->mem_info->daddr.phys_addr;
1743 if (right->mem_info)
1744 r = right->mem_info->daddr.phys_addr;
1745
1746 return (int64_t)(r - l);
1747}
1748
1749static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1750 size_t size, unsigned int width)
1751{
1752 uint64_t addr = 0;
1753 size_t ret = 0;
1754 size_t len = BITS_PER_LONG / 4;
1755
1756 addr = he->mem_info->daddr.phys_addr;
1757
1758 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1759
1760 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1761
1762 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1763
1764 if (ret > width)
1765 bf[width] = '\0';
1766
1767 return width;
1768}
1769
1770struct sort_entry sort_mem_phys_daddr = {
1771 .se_header = "Data Physical Address",
1772 .se_cmp = sort__phys_daddr_cmp,
1773 .se_snprintf = hist_entry__phys_daddr_snprintf,
1774 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1775};
1776
1777static int64_t
1778sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1779{
1780 uint64_t l = 0, r = 0;
1781
1782 if (left->mem_info)
1783 l = left->mem_info->daddr.data_page_size;
1784 if (right->mem_info)
1785 r = right->mem_info->daddr.data_page_size;
1786
1787 return (int64_t)(r - l);
1788}
1789
1790static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1791 size_t size, unsigned int width)
1792{
1793 char str[PAGE_SIZE_NAME_LEN];
1794
1795 return repsep_snprintf(bf, size, "%-*s", width,
1796 get_page_size_name(he->mem_info->daddr.data_page_size, str));
1797}
1798
1799struct sort_entry sort_mem_data_page_size = {
1800 .se_header = "Data Page Size",
1801 .se_cmp = sort__data_page_size_cmp,
1802 .se_snprintf = hist_entry__data_page_size_snprintf,
1803 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
1804};
1805
1806static int64_t
1807sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1808{
1809 uint64_t l = left->code_page_size;
1810 uint64_t r = right->code_page_size;
1811
1812 return (int64_t)(r - l);
1813}
1814
1815static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1816 size_t size, unsigned int width)
1817{
1818 char str[PAGE_SIZE_NAME_LEN];
1819
1820 return repsep_snprintf(bf, size, "%-*s", width,
1821 get_page_size_name(he->code_page_size, str));
1822}
1823
1824struct sort_entry sort_code_page_size = {
1825 .se_header = "Code Page Size",
1826 .se_cmp = sort__code_page_size_cmp,
1827 .se_snprintf = hist_entry__code_page_size_snprintf,
1828 .se_width_idx = HISTC_CODE_PAGE_SIZE,
1829};
1830
1831static int64_t
1832sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1833{
1834 if (!left->branch_info || !right->branch_info)
1835 return cmp_null(left->branch_info, right->branch_info);
1836
1837 return left->branch_info->flags.abort !=
1838 right->branch_info->flags.abort;
1839}
1840
1841static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1842 size_t size, unsigned int width)
1843{
1844 static const char *out = "N/A";
1845
1846 if (he->branch_info) {
1847 if (he->branch_info->flags.abort)
1848 out = "A";
1849 else
1850 out = ".";
1851 }
1852
1853 return repsep_snprintf(bf, size, "%-*s", width, out);
1854}
1855
1856struct sort_entry sort_abort = {
1857 .se_header = "Transaction abort",
1858 .se_cmp = sort__abort_cmp,
1859 .se_snprintf = hist_entry__abort_snprintf,
1860 .se_width_idx = HISTC_ABORT,
1861};
1862
1863static int64_t
1864sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1865{
1866 if (!left->branch_info || !right->branch_info)
1867 return cmp_null(left->branch_info, right->branch_info);
1868
1869 return left->branch_info->flags.in_tx !=
1870 right->branch_info->flags.in_tx;
1871}
1872
1873static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1874 size_t size, unsigned int width)
1875{
1876 static const char *out = "N/A";
1877
1878 if (he->branch_info) {
1879 if (he->branch_info->flags.in_tx)
1880 out = "T";
1881 else
1882 out = ".";
1883 }
1884
1885 return repsep_snprintf(bf, size, "%-*s", width, out);
1886}
1887
1888struct sort_entry sort_in_tx = {
1889 .se_header = "Branch in transaction",
1890 .se_cmp = sort__in_tx_cmp,
1891 .se_snprintf = hist_entry__in_tx_snprintf,
1892 .se_width_idx = HISTC_IN_TX,
1893};
1894
1895static int64_t
1896sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1897{
1898 return left->transaction - right->transaction;
1899}
1900
1901static inline char *add_str(char *p, const char *str)
1902{
1903 strcpy(p, str);
1904 return p + strlen(str);
1905}
1906
1907static struct txbit {
1908 unsigned flag;
1909 const char *name;
1910 int skip_for_len;
1911} txbits[] = {
1912 { PERF_TXN_ELISION, "EL ", 0 },
1913 { PERF_TXN_TRANSACTION, "TX ", 1 },
1914 { PERF_TXN_SYNC, "SYNC ", 1 },
1915 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1916 { PERF_TXN_RETRY, "RETRY ", 0 },
1917 { PERF_TXN_CONFLICT, "CON ", 0 },
1918 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1919 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1920 { 0, NULL, 0 }
1921};
1922
1923int hist_entry__transaction_len(void)
1924{
1925 int i;
1926 int len = 0;
1927
1928 for (i = 0; txbits[i].name; i++) {
1929 if (!txbits[i].skip_for_len)
1930 len += strlen(txbits[i].name);
1931 }
1932 len += 4; /* :XX<space> */
1933 return len;
1934}
1935
1936static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1937 size_t size, unsigned int width)
1938{
1939 u64 t = he->transaction;
1940 char buf[128];
1941 char *p = buf;
1942 int i;
1943
1944 buf[0] = 0;
1945 for (i = 0; txbits[i].name; i++)
1946 if (txbits[i].flag & t)
1947 p = add_str(p, txbits[i].name);
1948 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1949 p = add_str(p, "NEITHER ");
1950 if (t & PERF_TXN_ABORT_MASK) {
1951 sprintf(p, ":%" PRIx64,
1952 (t & PERF_TXN_ABORT_MASK) >>
1953 PERF_TXN_ABORT_SHIFT);
1954 p += strlen(p);
1955 }
1956
1957 return repsep_snprintf(bf, size, "%-*s", width, buf);
1958}
1959
1960struct sort_entry sort_transaction = {
1961 .se_header = "Transaction ",
1962 .se_cmp = sort__transaction_cmp,
1963 .se_snprintf = hist_entry__transaction_snprintf,
1964 .se_width_idx = HISTC_TRANSACTION,
1965};
1966
1967/* --sort symbol_size */
1968
1969static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1970{
1971 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1972 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1973
1974 return size_l < size_r ? -1 :
1975 size_l == size_r ? 0 : 1;
1976}
1977
1978static int64_t
1979sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1980{
1981 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1982}
1983
1984static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1985 size_t bf_size, unsigned int width)
1986{
1987 if (sym)
1988 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1989
1990 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1991}
1992
1993static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1994 size_t size, unsigned int width)
1995{
1996 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1997}
1998
1999struct sort_entry sort_sym_size = {
2000 .se_header = "Symbol size",
2001 .se_cmp = sort__sym_size_cmp,
2002 .se_snprintf = hist_entry__sym_size_snprintf,
2003 .se_width_idx = HISTC_SYM_SIZE,
2004};
2005
2006/* --sort dso_size */
2007
2008static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2009{
2010 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2011 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2012
2013 return size_l < size_r ? -1 :
2014 size_l == size_r ? 0 : 1;
2015}
2016
2017static int64_t
2018sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2019{
2020 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2021}
2022
2023static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2024 size_t bf_size, unsigned int width)
2025{
2026 if (map && map->dso)
2027 return repsep_snprintf(bf, bf_size, "%*d", width,
2028 map__size(map));
2029
2030 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2031}
2032
2033static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2034 size_t size, unsigned int width)
2035{
2036 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2037}
2038
2039struct sort_entry sort_dso_size = {
2040 .se_header = "DSO size",
2041 .se_cmp = sort__dso_size_cmp,
2042 .se_snprintf = hist_entry__dso_size_snprintf,
2043 .se_width_idx = HISTC_DSO_SIZE,
2044};
2045
2046/* --sort dso_size */
2047
2048static int64_t
2049sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2050{
2051 u64 left_ip = left->ip;
2052 u64 right_ip = right->ip;
2053 struct map *left_map = left->ms.map;
2054 struct map *right_map = right->ms.map;
2055
2056 if (left_map)
2057 left_ip = left_map->unmap_ip(left_map, left_ip);
2058 if (right_map)
2059 right_ip = right_map->unmap_ip(right_map, right_ip);
2060
2061 return _sort__addr_cmp(left_ip, right_ip);
2062}
2063
2064static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2065 size_t size, unsigned int width)
2066{
2067 u64 ip = he->ip;
2068 struct map *map = he->ms.map;
2069
2070 if (map)
2071 ip = map->unmap_ip(map, ip);
2072
2073 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2074}
2075
2076struct sort_entry sort_addr = {
2077 .se_header = "Address",
2078 .se_cmp = sort__addr_cmp,
2079 .se_snprintf = hist_entry__addr_snprintf,
2080 .se_width_idx = HISTC_ADDR,
2081};
2082
2083
2084struct sort_dimension {
2085 const char *name;
2086 struct sort_entry *entry;
2087 int taken;
2088};
2089
2090int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2091{
2092 return 0;
2093}
2094
2095const char * __weak arch_perf_header_entry(const char *se_header)
2096{
2097 return se_header;
2098}
2099
2100static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2101{
2102 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2103}
2104
2105#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2106
2107static struct sort_dimension common_sort_dimensions[] = {
2108 DIM(SORT_PID, "pid", sort_thread),
2109 DIM(SORT_COMM, "comm", sort_comm),
2110 DIM(SORT_DSO, "dso", sort_dso),
2111 DIM(SORT_SYM, "symbol", sort_sym),
2112 DIM(SORT_PARENT, "parent", sort_parent),
2113 DIM(SORT_CPU, "cpu", sort_cpu),
2114 DIM(SORT_SOCKET, "socket", sort_socket),
2115 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2116 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2117 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2118 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2119 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2120#ifdef HAVE_LIBTRACEEVENT
2121 DIM(SORT_TRACE, "trace", sort_trace),
2122#endif
2123 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2124 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2125 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2126 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2127 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2128 DIM(SORT_TIME, "time", sort_time),
2129 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2130 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2131 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2132 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2133 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2134 DIM(SORT_ADDR, "addr", sort_addr),
2135};
2136
2137#undef DIM
2138
2139#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2140
2141static struct sort_dimension bstack_sort_dimensions[] = {
2142 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2143 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2144 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2145 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2146 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2147 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2148 DIM(SORT_ABORT, "abort", sort_abort),
2149 DIM(SORT_CYCLES, "cycles", sort_cycles),
2150 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2151 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2152 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2153 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2154 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2155};
2156
2157#undef DIM
2158
2159#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2160
2161static struct sort_dimension memory_sort_dimensions[] = {
2162 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2163 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2164 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2165 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2166 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2167 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2168 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2169 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2170 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2171 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2172 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2173};
2174
2175#undef DIM
2176
2177struct hpp_dimension {
2178 const char *name;
2179 struct perf_hpp_fmt *fmt;
2180 int taken;
2181};
2182
2183#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2184
2185static struct hpp_dimension hpp_sort_dimensions[] = {
2186 DIM(PERF_HPP__OVERHEAD, "overhead"),
2187 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2188 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2189 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2190 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2191 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2192 DIM(PERF_HPP__SAMPLES, "sample"),
2193 DIM(PERF_HPP__PERIOD, "period"),
2194};
2195
2196#undef DIM
2197
2198struct hpp_sort_entry {
2199 struct perf_hpp_fmt hpp;
2200 struct sort_entry *se;
2201};
2202
2203void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2204{
2205 struct hpp_sort_entry *hse;
2206
2207 if (!perf_hpp__is_sort_entry(fmt))
2208 return;
2209
2210 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2211 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2212}
2213
2214static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2215 struct hists *hists, int line __maybe_unused,
2216 int *span __maybe_unused)
2217{
2218 struct hpp_sort_entry *hse;
2219 size_t len = fmt->user_len;
2220
2221 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2222
2223 if (!len)
2224 len = hists__col_len(hists, hse->se->se_width_idx);
2225
2226 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2227}
2228
2229static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2230 struct perf_hpp *hpp __maybe_unused,
2231 struct hists *hists)
2232{
2233 struct hpp_sort_entry *hse;
2234 size_t len = fmt->user_len;
2235
2236 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2237
2238 if (!len)
2239 len = hists__col_len(hists, hse->se->se_width_idx);
2240
2241 return len;
2242}
2243
2244static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2245 struct hist_entry *he)
2246{
2247 struct hpp_sort_entry *hse;
2248 size_t len = fmt->user_len;
2249
2250 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2251
2252 if (!len)
2253 len = hists__col_len(he->hists, hse->se->se_width_idx);
2254
2255 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2256}
2257
2258static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2259 struct hist_entry *a, struct hist_entry *b)
2260{
2261 struct hpp_sort_entry *hse;
2262
2263 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2264 return hse->se->se_cmp(a, b);
2265}
2266
2267static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2268 struct hist_entry *a, struct hist_entry *b)
2269{
2270 struct hpp_sort_entry *hse;
2271 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2272
2273 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2274 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2275 return collapse_fn(a, b);
2276}
2277
2278static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2279 struct hist_entry *a, struct hist_entry *b)
2280{
2281 struct hpp_sort_entry *hse;
2282 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2283
2284 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2285 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2286 return sort_fn(a, b);
2287}
2288
2289bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2290{
2291 return format->header == __sort__hpp_header;
2292}
2293
2294#define MK_SORT_ENTRY_CHK(key) \
2295bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2296{ \
2297 struct hpp_sort_entry *hse; \
2298 \
2299 if (!perf_hpp__is_sort_entry(fmt)) \
2300 return false; \
2301 \
2302 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2303 return hse->se == &sort_ ## key ; \
2304}
2305
2306#ifdef HAVE_LIBTRACEEVENT
2307MK_SORT_ENTRY_CHK(trace)
2308#else
2309bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2310{
2311 return false;
2312}
2313#endif
2314MK_SORT_ENTRY_CHK(srcline)
2315MK_SORT_ENTRY_CHK(srcfile)
2316MK_SORT_ENTRY_CHK(thread)
2317MK_SORT_ENTRY_CHK(comm)
2318MK_SORT_ENTRY_CHK(dso)
2319MK_SORT_ENTRY_CHK(sym)
2320
2321
2322static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2323{
2324 struct hpp_sort_entry *hse_a;
2325 struct hpp_sort_entry *hse_b;
2326
2327 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2328 return false;
2329
2330 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2331 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2332
2333 return hse_a->se == hse_b->se;
2334}
2335
2336static void hse_free(struct perf_hpp_fmt *fmt)
2337{
2338 struct hpp_sort_entry *hse;
2339
2340 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2341 free(hse);
2342}
2343
2344static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2345{
2346 struct hpp_sort_entry *hse;
2347
2348 if (!perf_hpp__is_sort_entry(fmt))
2349 return;
2350
2351 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2352
2353 if (hse->se->se_init)
2354 hse->se->se_init(he);
2355}
2356
2357static struct hpp_sort_entry *
2358__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2359{
2360 struct hpp_sort_entry *hse;
2361
2362 hse = malloc(sizeof(*hse));
2363 if (hse == NULL) {
2364 pr_err("Memory allocation failed\n");
2365 return NULL;
2366 }
2367
2368 hse->se = sd->entry;
2369 hse->hpp.name = sd->entry->se_header;
2370 hse->hpp.header = __sort__hpp_header;
2371 hse->hpp.width = __sort__hpp_width;
2372 hse->hpp.entry = __sort__hpp_entry;
2373 hse->hpp.color = NULL;
2374
2375 hse->hpp.cmp = __sort__hpp_cmp;
2376 hse->hpp.collapse = __sort__hpp_collapse;
2377 hse->hpp.sort = __sort__hpp_sort;
2378 hse->hpp.equal = __sort__hpp_equal;
2379 hse->hpp.free = hse_free;
2380 hse->hpp.init = hse_init;
2381
2382 INIT_LIST_HEAD(&hse->hpp.list);
2383 INIT_LIST_HEAD(&hse->hpp.sort_list);
2384 hse->hpp.elide = false;
2385 hse->hpp.len = 0;
2386 hse->hpp.user_len = 0;
2387 hse->hpp.level = level;
2388
2389 return hse;
2390}
2391
2392static void hpp_free(struct perf_hpp_fmt *fmt)
2393{
2394 free(fmt);
2395}
2396
2397static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2398 int level)
2399{
2400 struct perf_hpp_fmt *fmt;
2401
2402 fmt = memdup(hd->fmt, sizeof(*fmt));
2403 if (fmt) {
2404 INIT_LIST_HEAD(&fmt->list);
2405 INIT_LIST_HEAD(&fmt->sort_list);
2406 fmt->free = hpp_free;
2407 fmt->level = level;
2408 }
2409
2410 return fmt;
2411}
2412
2413int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2414{
2415 struct perf_hpp_fmt *fmt;
2416 struct hpp_sort_entry *hse;
2417 int ret = -1;
2418 int r;
2419
2420 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2421 if (!perf_hpp__is_sort_entry(fmt))
2422 continue;
2423
2424 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2425 if (hse->se->se_filter == NULL)
2426 continue;
2427
2428 /*
2429 * hist entry is filtered if any of sort key in the hpp list
2430 * is applied. But it should skip non-matched filter types.
2431 */
2432 r = hse->se->se_filter(he, type, arg);
2433 if (r >= 0) {
2434 if (ret < 0)
2435 ret = 0;
2436 ret |= r;
2437 }
2438 }
2439
2440 return ret;
2441}
2442
2443static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2444 struct perf_hpp_list *list,
2445 int level)
2446{
2447 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2448
2449 if (hse == NULL)
2450 return -1;
2451
2452 perf_hpp_list__register_sort_field(list, &hse->hpp);
2453 return 0;
2454}
2455
2456static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2457 struct perf_hpp_list *list)
2458{
2459 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2460
2461 if (hse == NULL)
2462 return -1;
2463
2464 perf_hpp_list__column_register(list, &hse->hpp);
2465 return 0;
2466}
2467
2468#ifndef HAVE_LIBTRACEEVENT
2469bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2470{
2471 return false;
2472}
2473bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2474 struct hists *hists __maybe_unused)
2475{
2476 return false;
2477}
2478#else
2479struct hpp_dynamic_entry {
2480 struct perf_hpp_fmt hpp;
2481 struct evsel *evsel;
2482 struct tep_format_field *field;
2483 unsigned dynamic_len;
2484 bool raw_trace;
2485};
2486
2487static int hde_width(struct hpp_dynamic_entry *hde)
2488{
2489 if (!hde->hpp.len) {
2490 int len = hde->dynamic_len;
2491 int namelen = strlen(hde->field->name);
2492 int fieldlen = hde->field->size;
2493
2494 if (namelen > len)
2495 len = namelen;
2496
2497 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2498 /* length for print hex numbers */
2499 fieldlen = hde->field->size * 2 + 2;
2500 }
2501 if (fieldlen > len)
2502 len = fieldlen;
2503
2504 hde->hpp.len = len;
2505 }
2506 return hde->hpp.len;
2507}
2508
2509static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2510 struct hist_entry *he)
2511{
2512 char *str, *pos;
2513 struct tep_format_field *field = hde->field;
2514 size_t namelen;
2515 bool last = false;
2516
2517 if (hde->raw_trace)
2518 return;
2519
2520 /* parse pretty print result and update max length */
2521 if (!he->trace_output)
2522 he->trace_output = get_trace_output(he);
2523
2524 namelen = strlen(field->name);
2525 str = he->trace_output;
2526
2527 while (str) {
2528 pos = strchr(str, ' ');
2529 if (pos == NULL) {
2530 last = true;
2531 pos = str + strlen(str);
2532 }
2533
2534 if (!strncmp(str, field->name, namelen)) {
2535 size_t len;
2536
2537 str += namelen + 1;
2538 len = pos - str;
2539
2540 if (len > hde->dynamic_len)
2541 hde->dynamic_len = len;
2542 break;
2543 }
2544
2545 if (last)
2546 str = NULL;
2547 else
2548 str = pos + 1;
2549 }
2550}
2551
2552static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2553 struct hists *hists __maybe_unused,
2554 int line __maybe_unused,
2555 int *span __maybe_unused)
2556{
2557 struct hpp_dynamic_entry *hde;
2558 size_t len = fmt->user_len;
2559
2560 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2561
2562 if (!len)
2563 len = hde_width(hde);
2564
2565 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2566}
2567
2568static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2569 struct perf_hpp *hpp __maybe_unused,
2570 struct hists *hists __maybe_unused)
2571{
2572 struct hpp_dynamic_entry *hde;
2573 size_t len = fmt->user_len;
2574
2575 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2576
2577 if (!len)
2578 len = hde_width(hde);
2579
2580 return len;
2581}
2582
2583bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2584{
2585 struct hpp_dynamic_entry *hde;
2586
2587 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2588
2589 return hists_to_evsel(hists) == hde->evsel;
2590}
2591
2592static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2593 struct hist_entry *he)
2594{
2595 struct hpp_dynamic_entry *hde;
2596 size_t len = fmt->user_len;
2597 char *str, *pos;
2598 struct tep_format_field *field;
2599 size_t namelen;
2600 bool last = false;
2601 int ret;
2602
2603 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2604
2605 if (!len)
2606 len = hde_width(hde);
2607
2608 if (hde->raw_trace)
2609 goto raw_field;
2610
2611 if (!he->trace_output)
2612 he->trace_output = get_trace_output(he);
2613
2614 field = hde->field;
2615 namelen = strlen(field->name);
2616 str = he->trace_output;
2617
2618 while (str) {
2619 pos = strchr(str, ' ');
2620 if (pos == NULL) {
2621 last = true;
2622 pos = str + strlen(str);
2623 }
2624
2625 if (!strncmp(str, field->name, namelen)) {
2626 str += namelen + 1;
2627 str = strndup(str, pos - str);
2628
2629 if (str == NULL)
2630 return scnprintf(hpp->buf, hpp->size,
2631 "%*.*s", len, len, "ERROR");
2632 break;
2633 }
2634
2635 if (last)
2636 str = NULL;
2637 else
2638 str = pos + 1;
2639 }
2640
2641 if (str == NULL) {
2642 struct trace_seq seq;
2643raw_field:
2644 trace_seq_init(&seq);
2645 tep_print_field(&seq, he->raw_data, hde->field);
2646 str = seq.buffer;
2647 }
2648
2649 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2650 free(str);
2651 return ret;
2652}
2653
2654static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2655 struct hist_entry *a, struct hist_entry *b)
2656{
2657 struct hpp_dynamic_entry *hde;
2658 struct tep_format_field *field;
2659 unsigned offset, size;
2660
2661 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2662
2663 field = hde->field;
2664 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2665 unsigned long long dyn;
2666
2667 tep_read_number_field(field, a->raw_data, &dyn);
2668 offset = dyn & 0xffff;
2669 size = (dyn >> 16) & 0xffff;
2670#ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
2671 if (field->flags & TEP_FIELD_IS_RELATIVE)
2672 offset += field->offset + field->size;
2673#endif
2674 /* record max width for output */
2675 if (size > hde->dynamic_len)
2676 hde->dynamic_len = size;
2677 } else {
2678 offset = field->offset;
2679 size = field->size;
2680 }
2681
2682 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2683}
2684
2685bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2686{
2687 return fmt->cmp == __sort__hde_cmp;
2688}
2689
2690static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2691{
2692 struct hpp_dynamic_entry *hde_a;
2693 struct hpp_dynamic_entry *hde_b;
2694
2695 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2696 return false;
2697
2698 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2699 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2700
2701 return hde_a->field == hde_b->field;
2702}
2703
2704static void hde_free(struct perf_hpp_fmt *fmt)
2705{
2706 struct hpp_dynamic_entry *hde;
2707
2708 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2709 free(hde);
2710}
2711
2712static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2713{
2714 struct hpp_dynamic_entry *hde;
2715
2716 if (!perf_hpp__is_dynamic_entry(fmt))
2717 return;
2718
2719 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2720 update_dynamic_len(hde, he);
2721}
2722
2723static struct hpp_dynamic_entry *
2724__alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2725 int level)
2726{
2727 struct hpp_dynamic_entry *hde;
2728
2729 hde = malloc(sizeof(*hde));
2730 if (hde == NULL) {
2731 pr_debug("Memory allocation failed\n");
2732 return NULL;
2733 }
2734
2735 hde->evsel = evsel;
2736 hde->field = field;
2737 hde->dynamic_len = 0;
2738
2739 hde->hpp.name = field->name;
2740 hde->hpp.header = __sort__hde_header;
2741 hde->hpp.width = __sort__hde_width;
2742 hde->hpp.entry = __sort__hde_entry;
2743 hde->hpp.color = NULL;
2744
2745 hde->hpp.init = __sort__hde_init;
2746 hde->hpp.cmp = __sort__hde_cmp;
2747 hde->hpp.collapse = __sort__hde_cmp;
2748 hde->hpp.sort = __sort__hde_cmp;
2749 hde->hpp.equal = __sort__hde_equal;
2750 hde->hpp.free = hde_free;
2751
2752 INIT_LIST_HEAD(&hde->hpp.list);
2753 INIT_LIST_HEAD(&hde->hpp.sort_list);
2754 hde->hpp.elide = false;
2755 hde->hpp.len = 0;
2756 hde->hpp.user_len = 0;
2757 hde->hpp.level = level;
2758
2759 return hde;
2760}
2761#endif /* HAVE_LIBTRACEEVENT */
2762
2763struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2764{
2765 struct perf_hpp_fmt *new_fmt = NULL;
2766
2767 if (perf_hpp__is_sort_entry(fmt)) {
2768 struct hpp_sort_entry *hse, *new_hse;
2769
2770 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2771 new_hse = memdup(hse, sizeof(*hse));
2772 if (new_hse)
2773 new_fmt = &new_hse->hpp;
2774#ifdef HAVE_LIBTRACEEVENT
2775 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2776 struct hpp_dynamic_entry *hde, *new_hde;
2777
2778 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2779 new_hde = memdup(hde, sizeof(*hde));
2780 if (new_hde)
2781 new_fmt = &new_hde->hpp;
2782#endif
2783 } else {
2784 new_fmt = memdup(fmt, sizeof(*fmt));
2785 }
2786
2787 INIT_LIST_HEAD(&new_fmt->list);
2788 INIT_LIST_HEAD(&new_fmt->sort_list);
2789
2790 return new_fmt;
2791}
2792
2793static int parse_field_name(char *str, char **event, char **field, char **opt)
2794{
2795 char *event_name, *field_name, *opt_name;
2796
2797 event_name = str;
2798 field_name = strchr(str, '.');
2799
2800 if (field_name) {
2801 *field_name++ = '\0';
2802 } else {
2803 event_name = NULL;
2804 field_name = str;
2805 }
2806
2807 opt_name = strchr(field_name, '/');
2808 if (opt_name)
2809 *opt_name++ = '\0';
2810
2811 *event = event_name;
2812 *field = field_name;
2813 *opt = opt_name;
2814
2815 return 0;
2816}
2817
2818/* find match evsel using a given event name. The event name can be:
2819 * 1. '%' + event index (e.g. '%1' for first event)
2820 * 2. full event name (e.g. sched:sched_switch)
2821 * 3. partial event name (should not contain ':')
2822 */
2823static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2824{
2825 struct evsel *evsel = NULL;
2826 struct evsel *pos;
2827 bool full_name;
2828
2829 /* case 1 */
2830 if (event_name[0] == '%') {
2831 int nr = strtol(event_name+1, NULL, 0);
2832
2833 if (nr > evlist->core.nr_entries)
2834 return NULL;
2835
2836 evsel = evlist__first(evlist);
2837 while (--nr > 0)
2838 evsel = evsel__next(evsel);
2839
2840 return evsel;
2841 }
2842
2843 full_name = !!strchr(event_name, ':');
2844 evlist__for_each_entry(evlist, pos) {
2845 /* case 2 */
2846 if (full_name && !strcmp(pos->name, event_name))
2847 return pos;
2848 /* case 3 */
2849 if (!full_name && strstr(pos->name, event_name)) {
2850 if (evsel) {
2851 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2852 event_name, evsel->name, pos->name);
2853 return NULL;
2854 }
2855 evsel = pos;
2856 }
2857 }
2858
2859 return evsel;
2860}
2861
2862#ifdef HAVE_LIBTRACEEVENT
2863static int __dynamic_dimension__add(struct evsel *evsel,
2864 struct tep_format_field *field,
2865 bool raw_trace, int level)
2866{
2867 struct hpp_dynamic_entry *hde;
2868
2869 hde = __alloc_dynamic_entry(evsel, field, level);
2870 if (hde == NULL)
2871 return -ENOMEM;
2872
2873 hde->raw_trace = raw_trace;
2874
2875 perf_hpp__register_sort_field(&hde->hpp);
2876 return 0;
2877}
2878
2879static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2880{
2881 int ret;
2882 struct tep_format_field *field;
2883
2884 field = evsel->tp_format->format.fields;
2885 while (field) {
2886 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2887 if (ret < 0)
2888 return ret;
2889
2890 field = field->next;
2891 }
2892 return 0;
2893}
2894
2895static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2896 int level)
2897{
2898 int ret;
2899 struct evsel *evsel;
2900
2901 evlist__for_each_entry(evlist, evsel) {
2902 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2903 continue;
2904
2905 ret = add_evsel_fields(evsel, raw_trace, level);
2906 if (ret < 0)
2907 return ret;
2908 }
2909 return 0;
2910}
2911
2912static int add_all_matching_fields(struct evlist *evlist,
2913 char *field_name, bool raw_trace, int level)
2914{
2915 int ret = -ESRCH;
2916 struct evsel *evsel;
2917 struct tep_format_field *field;
2918
2919 evlist__for_each_entry(evlist, evsel) {
2920 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2921 continue;
2922
2923 field = tep_find_any_field(evsel->tp_format, field_name);
2924 if (field == NULL)
2925 continue;
2926
2927 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2928 if (ret < 0)
2929 break;
2930 }
2931 return ret;
2932}
2933#endif /* HAVE_LIBTRACEEVENT */
2934
2935static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2936 int level)
2937{
2938 char *str, *event_name, *field_name, *opt_name;
2939 struct evsel *evsel;
2940 bool raw_trace = symbol_conf.raw_trace;
2941 int ret = 0;
2942
2943 if (evlist == NULL)
2944 return -ENOENT;
2945
2946 str = strdup(tok);
2947 if (str == NULL)
2948 return -ENOMEM;
2949
2950 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2951 ret = -EINVAL;
2952 goto out;
2953 }
2954
2955 if (opt_name) {
2956 if (strcmp(opt_name, "raw")) {
2957 pr_debug("unsupported field option %s\n", opt_name);
2958 ret = -EINVAL;
2959 goto out;
2960 }
2961 raw_trace = true;
2962 }
2963
2964#ifdef HAVE_LIBTRACEEVENT
2965 if (!strcmp(field_name, "trace_fields")) {
2966 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2967 goto out;
2968 }
2969
2970 if (event_name == NULL) {
2971 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2972 goto out;
2973 }
2974#else
2975 evlist__for_each_entry(evlist, evsel) {
2976 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2977 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
2978 ret = -ENOTSUP;
2979 }
2980 }
2981
2982 if (ret) {
2983 pr_err("\n");
2984 goto out;
2985 }
2986#endif
2987
2988 evsel = find_evsel(evlist, event_name);
2989 if (evsel == NULL) {
2990 pr_debug("Cannot find event: %s\n", event_name);
2991 ret = -ENOENT;
2992 goto out;
2993 }
2994
2995 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2996 pr_debug("%s is not a tracepoint event\n", event_name);
2997 ret = -EINVAL;
2998 goto out;
2999 }
3000
3001#ifdef HAVE_LIBTRACEEVENT
3002 if (!strcmp(field_name, "*")) {
3003 ret = add_evsel_fields(evsel, raw_trace, level);
3004 } else {
3005 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3006
3007 if (field == NULL) {
3008 pr_debug("Cannot find event field for %s.%s\n",
3009 event_name, field_name);
3010 return -ENOENT;
3011 }
3012
3013 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3014 }
3015#else
3016 (void)level;
3017 (void)raw_trace;
3018#endif /* HAVE_LIBTRACEEVENT */
3019
3020out:
3021 free(str);
3022 return ret;
3023}
3024
3025static int __sort_dimension__add(struct sort_dimension *sd,
3026 struct perf_hpp_list *list,
3027 int level)
3028{
3029 if (sd->taken)
3030 return 0;
3031
3032 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3033 return -1;
3034
3035 if (sd->entry->se_collapse)
3036 list->need_collapse = 1;
3037
3038 sd->taken = 1;
3039
3040 return 0;
3041}
3042
3043static int __hpp_dimension__add(struct hpp_dimension *hd,
3044 struct perf_hpp_list *list,
3045 int level)
3046{
3047 struct perf_hpp_fmt *fmt;
3048
3049 if (hd->taken)
3050 return 0;
3051
3052 fmt = __hpp_dimension__alloc_hpp(hd, level);
3053 if (!fmt)
3054 return -1;
3055
3056 hd->taken = 1;
3057 perf_hpp_list__register_sort_field(list, fmt);
3058 return 0;
3059}
3060
3061static int __sort_dimension__add_output(struct perf_hpp_list *list,
3062 struct sort_dimension *sd)
3063{
3064 if (sd->taken)
3065 return 0;
3066
3067 if (__sort_dimension__add_hpp_output(sd, list) < 0)
3068 return -1;
3069
3070 sd->taken = 1;
3071 return 0;
3072}
3073
3074static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3075 struct hpp_dimension *hd)
3076{
3077 struct perf_hpp_fmt *fmt;
3078
3079 if (hd->taken)
3080 return 0;
3081
3082 fmt = __hpp_dimension__alloc_hpp(hd, 0);
3083 if (!fmt)
3084 return -1;
3085
3086 hd->taken = 1;
3087 perf_hpp_list__column_register(list, fmt);
3088 return 0;
3089}
3090
3091int hpp_dimension__add_output(unsigned col)
3092{
3093 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3094 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3095}
3096
3097int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3098 struct evlist *evlist,
3099 int level)
3100{
3101 unsigned int i, j;
3102
3103 /*
3104 * Check to see if there are any arch specific
3105 * sort dimensions not applicable for the current
3106 * architecture. If so, Skip that sort key since
3107 * we don't want to display it in the output fields.
3108 */
3109 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3110 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3111 !arch_support_sort_key(tok)) {
3112 return 0;
3113 }
3114 }
3115
3116 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3117 struct sort_dimension *sd = &common_sort_dimensions[i];
3118
3119 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3120 continue;
3121
3122 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3123 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3124 sort_dimension_add_dynamic_header(sd);
3125 }
3126
3127 if (sd->entry == &sort_parent) {
3128 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3129 if (ret) {
3130 char err[BUFSIZ];
3131
3132 regerror(ret, &parent_regex, err, sizeof(err));
3133 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3134 return -EINVAL;
3135 }
3136 list->parent = 1;
3137 } else if (sd->entry == &sort_sym) {
3138 list->sym = 1;
3139 /*
3140 * perf diff displays the performance difference amongst
3141 * two or more perf.data files. Those files could come
3142 * from different binaries. So we should not compare
3143 * their ips, but the name of symbol.
3144 */
3145 if (sort__mode == SORT_MODE__DIFF)
3146 sd->entry->se_collapse = sort__sym_sort;
3147
3148 } else if (sd->entry == &sort_dso) {
3149 list->dso = 1;
3150 } else if (sd->entry == &sort_socket) {
3151 list->socket = 1;
3152 } else if (sd->entry == &sort_thread) {
3153 list->thread = 1;
3154 } else if (sd->entry == &sort_comm) {
3155 list->comm = 1;
3156 }
3157
3158 return __sort_dimension__add(sd, list, level);
3159 }
3160
3161 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3162 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3163
3164 if (strncasecmp(tok, hd->name, strlen(tok)))
3165 continue;
3166
3167 return __hpp_dimension__add(hd, list, level);
3168 }
3169
3170 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3171 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3172
3173 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3174 continue;
3175
3176 if (sort__mode != SORT_MODE__BRANCH)
3177 return -EINVAL;
3178
3179 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3180 list->sym = 1;
3181
3182 __sort_dimension__add(sd, list, level);
3183 return 0;
3184 }
3185
3186 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3187 struct sort_dimension *sd = &memory_sort_dimensions[i];
3188
3189 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3190 continue;
3191
3192 if (sort__mode != SORT_MODE__MEMORY)
3193 return -EINVAL;
3194
3195 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3196 return -EINVAL;
3197
3198 if (sd->entry == &sort_mem_daddr_sym)
3199 list->sym = 1;
3200
3201 __sort_dimension__add(sd, list, level);
3202 return 0;
3203 }
3204
3205 if (!add_dynamic_entry(evlist, tok, level))
3206 return 0;
3207
3208 return -ESRCH;
3209}
3210
3211static int setup_sort_list(struct perf_hpp_list *list, char *str,
3212 struct evlist *evlist)
3213{
3214 char *tmp, *tok;
3215 int ret = 0;
3216 int level = 0;
3217 int next_level = 1;
3218 bool in_group = false;
3219
3220 do {
3221 tok = str;
3222 tmp = strpbrk(str, "{}, ");
3223 if (tmp) {
3224 if (in_group)
3225 next_level = level;
3226 else
3227 next_level = level + 1;
3228
3229 if (*tmp == '{')
3230 in_group = true;
3231 else if (*tmp == '}')
3232 in_group = false;
3233
3234 *tmp = '\0';
3235 str = tmp + 1;
3236 }
3237
3238 if (*tok) {
3239 ret = sort_dimension__add(list, tok, evlist, level);
3240 if (ret == -EINVAL) {
3241 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3242 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3243 else
3244 ui__error("Invalid --sort key: `%s'", tok);
3245 break;
3246 } else if (ret == -ESRCH) {
3247 ui__error("Unknown --sort key: `%s'", tok);
3248 break;
3249 }
3250 }
3251
3252 level = next_level;
3253 } while (tmp);
3254
3255 return ret;
3256}
3257
3258static const char *get_default_sort_order(struct evlist *evlist)
3259{
3260 const char *default_sort_orders[] = {
3261 default_sort_order,
3262 default_branch_sort_order,
3263 default_mem_sort_order,
3264 default_top_sort_order,
3265 default_diff_sort_order,
3266 default_tracepoint_sort_order,
3267 };
3268 bool use_trace = true;
3269 struct evsel *evsel;
3270
3271 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3272
3273 if (evlist == NULL || evlist__empty(evlist))
3274 goto out_no_evlist;
3275
3276 evlist__for_each_entry(evlist, evsel) {
3277 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3278 use_trace = false;
3279 break;
3280 }
3281 }
3282
3283 if (use_trace) {
3284 sort__mode = SORT_MODE__TRACEPOINT;
3285 if (symbol_conf.raw_trace)
3286 return "trace_fields";
3287 }
3288out_no_evlist:
3289 return default_sort_orders[sort__mode];
3290}
3291
3292static int setup_sort_order(struct evlist *evlist)
3293{
3294 char *new_sort_order;
3295
3296 /*
3297 * Append '+'-prefixed sort order to the default sort
3298 * order string.
3299 */
3300 if (!sort_order || is_strict_order(sort_order))
3301 return 0;
3302
3303 if (sort_order[1] == '\0') {
3304 ui__error("Invalid --sort key: `+'");
3305 return -EINVAL;
3306 }
3307
3308 /*
3309 * We allocate new sort_order string, but we never free it,
3310 * because it's checked over the rest of the code.
3311 */
3312 if (asprintf(&new_sort_order, "%s,%s",
3313 get_default_sort_order(evlist), sort_order + 1) < 0) {
3314 pr_err("Not enough memory to set up --sort");
3315 return -ENOMEM;
3316 }
3317
3318 sort_order = new_sort_order;
3319 return 0;
3320}
3321
3322/*
3323 * Adds 'pre,' prefix into 'str' is 'pre' is
3324 * not already part of 'str'.
3325 */
3326static char *prefix_if_not_in(const char *pre, char *str)
3327{
3328 char *n;
3329
3330 if (!str || strstr(str, pre))
3331 return str;
3332
3333 if (asprintf(&n, "%s,%s", pre, str) < 0)
3334 n = NULL;
3335
3336 free(str);
3337 return n;
3338}
3339
3340static char *setup_overhead(char *keys)
3341{
3342 if (sort__mode == SORT_MODE__DIFF)
3343 return keys;
3344
3345 keys = prefix_if_not_in("overhead", keys);
3346
3347 if (symbol_conf.cumulate_callchain)
3348 keys = prefix_if_not_in("overhead_children", keys);
3349
3350 return keys;
3351}
3352
3353static int __setup_sorting(struct evlist *evlist)
3354{
3355 char *str;
3356 const char *sort_keys;
3357 int ret = 0;
3358
3359 ret = setup_sort_order(evlist);
3360 if (ret)
3361 return ret;
3362
3363 sort_keys = sort_order;
3364 if (sort_keys == NULL) {
3365 if (is_strict_order(field_order)) {
3366 /*
3367 * If user specified field order but no sort order,
3368 * we'll honor it and not add default sort orders.
3369 */
3370 return 0;
3371 }
3372
3373 sort_keys = get_default_sort_order(evlist);
3374 }
3375
3376 str = strdup(sort_keys);
3377 if (str == NULL) {
3378 pr_err("Not enough memory to setup sort keys");
3379 return -ENOMEM;
3380 }
3381
3382 /*
3383 * Prepend overhead fields for backward compatibility.
3384 */
3385 if (!is_strict_order(field_order)) {
3386 str = setup_overhead(str);
3387 if (str == NULL) {
3388 pr_err("Not enough memory to setup overhead keys");
3389 return -ENOMEM;
3390 }
3391 }
3392
3393 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3394
3395 free(str);
3396 return ret;
3397}
3398
3399void perf_hpp__set_elide(int idx, bool elide)
3400{
3401 struct perf_hpp_fmt *fmt;
3402 struct hpp_sort_entry *hse;
3403
3404 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3405 if (!perf_hpp__is_sort_entry(fmt))
3406 continue;
3407
3408 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3409 if (hse->se->se_width_idx == idx) {
3410 fmt->elide = elide;
3411 break;
3412 }
3413 }
3414}
3415
3416static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3417{
3418 if (list && strlist__nr_entries(list) == 1) {
3419 if (fp != NULL)
3420 fprintf(fp, "# %s: %s\n", list_name,
3421 strlist__entry(list, 0)->s);
3422 return true;
3423 }
3424 return false;
3425}
3426
3427static bool get_elide(int idx, FILE *output)
3428{
3429 switch (idx) {
3430 case HISTC_SYMBOL:
3431 return __get_elide(symbol_conf.sym_list, "symbol", output);
3432 case HISTC_DSO:
3433 return __get_elide(symbol_conf.dso_list, "dso", output);
3434 case HISTC_COMM:
3435 return __get_elide(symbol_conf.comm_list, "comm", output);
3436 default:
3437 break;
3438 }
3439
3440 if (sort__mode != SORT_MODE__BRANCH)
3441 return false;
3442
3443 switch (idx) {
3444 case HISTC_SYMBOL_FROM:
3445 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3446 case HISTC_SYMBOL_TO:
3447 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3448 case HISTC_DSO_FROM:
3449 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3450 case HISTC_DSO_TO:
3451 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3452 case HISTC_ADDR_FROM:
3453 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3454 case HISTC_ADDR_TO:
3455 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3456 default:
3457 break;
3458 }
3459
3460 return false;
3461}
3462
3463void sort__setup_elide(FILE *output)
3464{
3465 struct perf_hpp_fmt *fmt;
3466 struct hpp_sort_entry *hse;
3467
3468 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3469 if (!perf_hpp__is_sort_entry(fmt))
3470 continue;
3471
3472 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3473 fmt->elide = get_elide(hse->se->se_width_idx, output);
3474 }
3475
3476 /*
3477 * It makes no sense to elide all of sort entries.
3478 * Just revert them to show up again.
3479 */
3480 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3481 if (!perf_hpp__is_sort_entry(fmt))
3482 continue;
3483
3484 if (!fmt->elide)
3485 return;
3486 }
3487
3488 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3489 if (!perf_hpp__is_sort_entry(fmt))
3490 continue;
3491
3492 fmt->elide = false;
3493 }
3494}
3495
3496int output_field_add(struct perf_hpp_list *list, char *tok)
3497{
3498 unsigned int i;
3499
3500 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3501 struct sort_dimension *sd = &common_sort_dimensions[i];
3502
3503 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3504 continue;
3505
3506 return __sort_dimension__add_output(list, sd);
3507 }
3508
3509 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3510 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3511
3512 if (strncasecmp(tok, hd->name, strlen(tok)))
3513 continue;
3514
3515 return __hpp_dimension__add_output(list, hd);
3516 }
3517
3518 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3519 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3520
3521 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3522 continue;
3523
3524 if (sort__mode != SORT_MODE__BRANCH)
3525 return -EINVAL;
3526
3527 return __sort_dimension__add_output(list, sd);
3528 }
3529
3530 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3531 struct sort_dimension *sd = &memory_sort_dimensions[i];
3532
3533 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3534 continue;
3535
3536 if (sort__mode != SORT_MODE__MEMORY)
3537 return -EINVAL;
3538
3539 return __sort_dimension__add_output(list, sd);
3540 }
3541
3542 return -ESRCH;
3543}
3544
3545static int setup_output_list(struct perf_hpp_list *list, char *str)
3546{
3547 char *tmp, *tok;
3548 int ret = 0;
3549
3550 for (tok = strtok_r(str, ", ", &tmp);
3551 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3552 ret = output_field_add(list, tok);
3553 if (ret == -EINVAL) {
3554 ui__error("Invalid --fields key: `%s'", tok);
3555 break;
3556 } else if (ret == -ESRCH) {
3557 ui__error("Unknown --fields key: `%s'", tok);
3558 break;
3559 }
3560 }
3561
3562 return ret;
3563}
3564
3565void reset_dimensions(void)
3566{
3567 unsigned int i;
3568
3569 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3570 common_sort_dimensions[i].taken = 0;
3571
3572 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3573 hpp_sort_dimensions[i].taken = 0;
3574
3575 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3576 bstack_sort_dimensions[i].taken = 0;
3577
3578 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3579 memory_sort_dimensions[i].taken = 0;
3580}
3581
3582bool is_strict_order(const char *order)
3583{
3584 return order && (*order != '+');
3585}
3586
3587static int __setup_output_field(void)
3588{
3589 char *str, *strp;
3590 int ret = -EINVAL;
3591
3592 if (field_order == NULL)
3593 return 0;
3594
3595 strp = str = strdup(field_order);
3596 if (str == NULL) {
3597 pr_err("Not enough memory to setup output fields");
3598 return -ENOMEM;
3599 }
3600
3601 if (!is_strict_order(field_order))
3602 strp++;
3603
3604 if (!strlen(strp)) {
3605 ui__error("Invalid --fields key: `+'");
3606 goto out;
3607 }
3608
3609 ret = setup_output_list(&perf_hpp_list, strp);
3610
3611out:
3612 free(str);
3613 return ret;
3614}
3615
3616int setup_sorting(struct evlist *evlist)
3617{
3618 int err;
3619
3620 err = __setup_sorting(evlist);
3621 if (err < 0)
3622 return err;
3623
3624 if (parent_pattern != default_parent_pattern) {
3625 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3626 if (err < 0)
3627 return err;
3628 }
3629
3630 reset_dimensions();
3631
3632 /*
3633 * perf diff doesn't use default hpp output fields.
3634 */
3635 if (sort__mode != SORT_MODE__DIFF)
3636 perf_hpp__init();
3637
3638 err = __setup_output_field();
3639 if (err < 0)
3640 return err;
3641
3642 /* copy sort keys to output fields */
3643 perf_hpp__setup_output_field(&perf_hpp_list);
3644 /* and then copy output fields to sort keys */
3645 perf_hpp__append_sort_keys(&perf_hpp_list);
3646
3647 /* setup hists-specific output fields */
3648 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3649 return -1;
3650
3651 return 0;
3652}
3653
3654void reset_output_field(void)
3655{
3656 perf_hpp_list.need_collapse = 0;
3657 perf_hpp_list.parent = 0;
3658 perf_hpp_list.sym = 0;
3659 perf_hpp_list.dso = 0;
3660
3661 field_order = NULL;
3662 sort_order = NULL;
3663
3664 reset_dimensions();
3665 perf_hpp__reset_output_field(&perf_hpp_list);
3666}
3667
3668#define INDENT (3*8 + 1)
3669
3670static void add_key(struct strbuf *sb, const char *str, int *llen)
3671{
3672 if (!str)
3673 return;
3674
3675 if (*llen >= 75) {
3676 strbuf_addstr(sb, "\n\t\t\t ");
3677 *llen = INDENT;
3678 }
3679 strbuf_addf(sb, " %s", str);
3680 *llen += strlen(str) + 1;
3681}
3682
3683static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3684 int *llen)
3685{
3686 int i;
3687
3688 for (i = 0; i < n; i++)
3689 add_key(sb, s[i].name, llen);
3690}
3691
3692static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3693 int *llen)
3694{
3695 int i;
3696
3697 for (i = 0; i < n; i++)
3698 add_key(sb, s[i].name, llen);
3699}
3700
3701char *sort_help(const char *prefix)
3702{
3703 struct strbuf sb;
3704 char *s;
3705 int len = strlen(prefix) + INDENT;
3706
3707 strbuf_init(&sb, 300);
3708 strbuf_addstr(&sb, prefix);
3709 add_hpp_sort_string(&sb, hpp_sort_dimensions,
3710 ARRAY_SIZE(hpp_sort_dimensions), &len);
3711 add_sort_string(&sb, common_sort_dimensions,
3712 ARRAY_SIZE(common_sort_dimensions), &len);
3713 add_sort_string(&sb, bstack_sort_dimensions,
3714 ARRAY_SIZE(bstack_sort_dimensions), &len);
3715 add_sort_string(&sb, memory_sort_dimensions,
3716 ARRAY_SIZE(memory_sort_dimensions), &len);
3717 s = strbuf_detach(&sb, NULL);
3718 strbuf_release(&sb);
3719 return s;
3720}
1#include <sys/mman.h>
2#include "sort.h"
3#include "hist.h"
4#include "comm.h"
5#include "symbol.h"
6#include "evsel.h"
7#include "evlist.h"
8#include <traceevent/event-parse.h>
9#include "mem-events.h"
10
11regex_t parent_regex;
12const char default_parent_pattern[] = "^sys_|^do_page_fault";
13const char *parent_pattern = default_parent_pattern;
14const char default_sort_order[] = "comm,dso,symbol";
15const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
16const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
17const char default_top_sort_order[] = "dso,symbol";
18const char default_diff_sort_order[] = "dso,symbol";
19const char default_tracepoint_sort_order[] = "trace";
20const char *sort_order;
21const char *field_order;
22regex_t ignore_callees_regex;
23int have_ignore_callees = 0;
24int sort__need_collapse = 0;
25int sort__has_parent = 0;
26int sort__has_sym = 0;
27int sort__has_dso = 0;
28int sort__has_socket = 0;
29int sort__has_thread = 0;
30int sort__has_comm = 0;
31enum sort_mode sort__mode = SORT_MODE__NORMAL;
32
33/*
34 * Replaces all occurrences of a char used with the:
35 *
36 * -t, --field-separator
37 *
38 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator.
41*/
42static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
43{
44 int n;
45 va_list ap;
46
47 va_start(ap, fmt);
48 n = vsnprintf(bf, size, fmt, ap);
49 if (symbol_conf.field_sep && n > 0) {
50 char *sep = bf;
51
52 while (1) {
53 sep = strchr(sep, *symbol_conf.field_sep);
54 if (sep == NULL)
55 break;
56 *sep = '.';
57 }
58 }
59 va_end(ap);
60
61 if (n >= (int)size)
62 return size - 1;
63 return n;
64}
65
66static int64_t cmp_null(const void *l, const void *r)
67{
68 if (!l && !r)
69 return 0;
70 else if (!l)
71 return -1;
72 else
73 return 1;
74}
75
76/* --sort pid */
77
78static int64_t
79sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
80{
81 return right->thread->tid - left->thread->tid;
82}
83
84static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
85 size_t size, unsigned int width)
86{
87 const char *comm = thread__comm_str(he->thread);
88
89 width = max(7U, width) - 6;
90 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
91 width, width, comm ?: "");
92}
93
94static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
95{
96 const struct thread *th = arg;
97
98 if (type != HIST_FILTER__THREAD)
99 return -1;
100
101 return th && he->thread != th;
102}
103
104struct sort_entry sort_thread = {
105 .se_header = " Pid:Command",
106 .se_cmp = sort__thread_cmp,
107 .se_snprintf = hist_entry__thread_snprintf,
108 .se_filter = hist_entry__thread_filter,
109 .se_width_idx = HISTC_THREAD,
110};
111
112/* --sort comm */
113
114static int64_t
115sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
116{
117 /* Compare the addr that should be unique among comm */
118 return strcmp(comm__str(right->comm), comm__str(left->comm));
119}
120
121static int64_t
122sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
123{
124 /* Compare the addr that should be unique among comm */
125 return strcmp(comm__str(right->comm), comm__str(left->comm));
126}
127
128static int64_t
129sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
130{
131 return strcmp(comm__str(right->comm), comm__str(left->comm));
132}
133
134static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
135 size_t size, unsigned int width)
136{
137 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
138}
139
140struct sort_entry sort_comm = {
141 .se_header = "Command",
142 .se_cmp = sort__comm_cmp,
143 .se_collapse = sort__comm_collapse,
144 .se_sort = sort__comm_sort,
145 .se_snprintf = hist_entry__comm_snprintf,
146 .se_filter = hist_entry__thread_filter,
147 .se_width_idx = HISTC_COMM,
148};
149
150/* --sort dso */
151
152static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
153{
154 struct dso *dso_l = map_l ? map_l->dso : NULL;
155 struct dso *dso_r = map_r ? map_r->dso : NULL;
156 const char *dso_name_l, *dso_name_r;
157
158 if (!dso_l || !dso_r)
159 return cmp_null(dso_r, dso_l);
160
161 if (verbose) {
162 dso_name_l = dso_l->long_name;
163 dso_name_r = dso_r->long_name;
164 } else {
165 dso_name_l = dso_l->short_name;
166 dso_name_r = dso_r->short_name;
167 }
168
169 return strcmp(dso_name_l, dso_name_r);
170}
171
172static int64_t
173sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
174{
175 return _sort__dso_cmp(right->ms.map, left->ms.map);
176}
177
178static int _hist_entry__dso_snprintf(struct map *map, char *bf,
179 size_t size, unsigned int width)
180{
181 if (map && map->dso) {
182 const char *dso_name = !verbose ? map->dso->short_name :
183 map->dso->long_name;
184 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
185 }
186
187 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
188}
189
190static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
191 size_t size, unsigned int width)
192{
193 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
194}
195
196static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
197{
198 const struct dso *dso = arg;
199
200 if (type != HIST_FILTER__DSO)
201 return -1;
202
203 return dso && (!he->ms.map || he->ms.map->dso != dso);
204}
205
206struct sort_entry sort_dso = {
207 .se_header = "Shared Object",
208 .se_cmp = sort__dso_cmp,
209 .se_snprintf = hist_entry__dso_snprintf,
210 .se_filter = hist_entry__dso_filter,
211 .se_width_idx = HISTC_DSO,
212};
213
214/* --sort symbol */
215
216static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
217{
218 return (int64_t)(right_ip - left_ip);
219}
220
221static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
222{
223 if (!sym_l || !sym_r)
224 return cmp_null(sym_l, sym_r);
225
226 if (sym_l == sym_r)
227 return 0;
228
229 if (sym_l->start != sym_r->start)
230 return (int64_t)(sym_r->start - sym_l->start);
231
232 return (int64_t)(sym_r->end - sym_l->end);
233}
234
235static int64_t
236sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
237{
238 int64_t ret;
239
240 if (!left->ms.sym && !right->ms.sym)
241 return _sort__addr_cmp(left->ip, right->ip);
242
243 /*
244 * comparing symbol address alone is not enough since it's a
245 * relative address within a dso.
246 */
247 if (!sort__has_dso) {
248 ret = sort__dso_cmp(left, right);
249 if (ret != 0)
250 return ret;
251 }
252
253 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
254}
255
256static int64_t
257sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
258{
259 if (!left->ms.sym || !right->ms.sym)
260 return cmp_null(left->ms.sym, right->ms.sym);
261
262 return strcmp(right->ms.sym->name, left->ms.sym->name);
263}
264
265static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
266 u64 ip, char level, char *bf, size_t size,
267 unsigned int width)
268{
269 size_t ret = 0;
270
271 if (verbose) {
272 char o = map ? dso__symtab_origin(map->dso) : '!';
273 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
274 BITS_PER_LONG / 4 + 2, ip, o);
275 }
276
277 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
278 if (sym && map) {
279 if (map->type == MAP__VARIABLE) {
280 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
281 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
282 ip - map->unmap_ip(map, sym->start));
283 } else {
284 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
285 width - ret,
286 sym->name);
287 }
288 } else {
289 size_t len = BITS_PER_LONG / 4;
290 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
291 len, ip);
292 }
293
294 return ret;
295}
296
297static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
298 size_t size, unsigned int width)
299{
300 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
301 he->level, bf, size, width);
302}
303
304static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
305{
306 const char *sym = arg;
307
308 if (type != HIST_FILTER__SYMBOL)
309 return -1;
310
311 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
312}
313
314struct sort_entry sort_sym = {
315 .se_header = "Symbol",
316 .se_cmp = sort__sym_cmp,
317 .se_sort = sort__sym_sort,
318 .se_snprintf = hist_entry__sym_snprintf,
319 .se_filter = hist_entry__sym_filter,
320 .se_width_idx = HISTC_SYMBOL,
321};
322
323/* --sort srcline */
324
325static char *hist_entry__get_srcline(struct hist_entry *he)
326{
327 struct map *map = he->ms.map;
328
329 if (!map)
330 return SRCLINE_UNKNOWN;
331
332 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
333 he->ms.sym, true);
334}
335
336static int64_t
337sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
338{
339 if (!left->srcline)
340 left->srcline = hist_entry__get_srcline(left);
341 if (!right->srcline)
342 right->srcline = hist_entry__get_srcline(right);
343
344 return strcmp(right->srcline, left->srcline);
345}
346
347static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
348 size_t size, unsigned int width)
349{
350 if (!he->srcline)
351 he->srcline = hist_entry__get_srcline(he);
352
353 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
354}
355
356struct sort_entry sort_srcline = {
357 .se_header = "Source:Line",
358 .se_cmp = sort__srcline_cmp,
359 .se_snprintf = hist_entry__srcline_snprintf,
360 .se_width_idx = HISTC_SRCLINE,
361};
362
363/* --sort srcfile */
364
365static char no_srcfile[1];
366
367static char *hist_entry__get_srcfile(struct hist_entry *e)
368{
369 char *sf, *p;
370 struct map *map = e->ms.map;
371
372 if (!map)
373 return no_srcfile;
374
375 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
376 e->ms.sym, false, true);
377 if (!strcmp(sf, SRCLINE_UNKNOWN))
378 return no_srcfile;
379 p = strchr(sf, ':');
380 if (p && *sf) {
381 *p = 0;
382 return sf;
383 }
384 free(sf);
385 return no_srcfile;
386}
387
388static int64_t
389sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
390{
391 if (!left->srcfile)
392 left->srcfile = hist_entry__get_srcfile(left);
393 if (!right->srcfile)
394 right->srcfile = hist_entry__get_srcfile(right);
395
396 return strcmp(right->srcfile, left->srcfile);
397}
398
399static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
400 size_t size, unsigned int width)
401{
402 if (!he->srcfile)
403 he->srcfile = hist_entry__get_srcfile(he);
404
405 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
406}
407
408struct sort_entry sort_srcfile = {
409 .se_header = "Source File",
410 .se_cmp = sort__srcfile_cmp,
411 .se_snprintf = hist_entry__srcfile_snprintf,
412 .se_width_idx = HISTC_SRCFILE,
413};
414
415/* --sort parent */
416
417static int64_t
418sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
419{
420 struct symbol *sym_l = left->parent;
421 struct symbol *sym_r = right->parent;
422
423 if (!sym_l || !sym_r)
424 return cmp_null(sym_l, sym_r);
425
426 return strcmp(sym_r->name, sym_l->name);
427}
428
429static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
430 size_t size, unsigned int width)
431{
432 return repsep_snprintf(bf, size, "%-*.*s", width, width,
433 he->parent ? he->parent->name : "[other]");
434}
435
436struct sort_entry sort_parent = {
437 .se_header = "Parent symbol",
438 .se_cmp = sort__parent_cmp,
439 .se_snprintf = hist_entry__parent_snprintf,
440 .se_width_idx = HISTC_PARENT,
441};
442
443/* --sort cpu */
444
445static int64_t
446sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
447{
448 return right->cpu - left->cpu;
449}
450
451static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
452 size_t size, unsigned int width)
453{
454 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
455}
456
457struct sort_entry sort_cpu = {
458 .se_header = "CPU",
459 .se_cmp = sort__cpu_cmp,
460 .se_snprintf = hist_entry__cpu_snprintf,
461 .se_width_idx = HISTC_CPU,
462};
463
464/* --sort socket */
465
466static int64_t
467sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
468{
469 return right->socket - left->socket;
470}
471
472static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
473 size_t size, unsigned int width)
474{
475 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
476}
477
478static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
479{
480 int sk = *(const int *)arg;
481
482 if (type != HIST_FILTER__SOCKET)
483 return -1;
484
485 return sk >= 0 && he->socket != sk;
486}
487
488struct sort_entry sort_socket = {
489 .se_header = "Socket",
490 .se_cmp = sort__socket_cmp,
491 .se_snprintf = hist_entry__socket_snprintf,
492 .se_filter = hist_entry__socket_filter,
493 .se_width_idx = HISTC_SOCKET,
494};
495
496/* --sort trace */
497
498static char *get_trace_output(struct hist_entry *he)
499{
500 struct trace_seq seq;
501 struct perf_evsel *evsel;
502 struct pevent_record rec = {
503 .data = he->raw_data,
504 .size = he->raw_size,
505 };
506
507 evsel = hists_to_evsel(he->hists);
508
509 trace_seq_init(&seq);
510 if (symbol_conf.raw_trace) {
511 pevent_print_fields(&seq, he->raw_data, he->raw_size,
512 evsel->tp_format);
513 } else {
514 pevent_event_info(&seq, evsel->tp_format, &rec);
515 }
516 return seq.buffer;
517}
518
519static int64_t
520sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
521{
522 struct perf_evsel *evsel;
523
524 evsel = hists_to_evsel(left->hists);
525 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
526 return 0;
527
528 if (left->trace_output == NULL)
529 left->trace_output = get_trace_output(left);
530 if (right->trace_output == NULL)
531 right->trace_output = get_trace_output(right);
532
533 return strcmp(right->trace_output, left->trace_output);
534}
535
536static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
537 size_t size, unsigned int width)
538{
539 struct perf_evsel *evsel;
540
541 evsel = hists_to_evsel(he->hists);
542 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
543 return scnprintf(bf, size, "%-.*s", width, "N/A");
544
545 if (he->trace_output == NULL)
546 he->trace_output = get_trace_output(he);
547 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
548}
549
550struct sort_entry sort_trace = {
551 .se_header = "Trace output",
552 .se_cmp = sort__trace_cmp,
553 .se_snprintf = hist_entry__trace_snprintf,
554 .se_width_idx = HISTC_TRACE,
555};
556
557/* sort keys for branch stacks */
558
559static int64_t
560sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
561{
562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 return _sort__dso_cmp(left->branch_info->from.map,
566 right->branch_info->from.map);
567}
568
569static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
570 size_t size, unsigned int width)
571{
572 if (he->branch_info)
573 return _hist_entry__dso_snprintf(he->branch_info->from.map,
574 bf, size, width);
575 else
576 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
577}
578
579static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
580 const void *arg)
581{
582 const struct dso *dso = arg;
583
584 if (type != HIST_FILTER__DSO)
585 return -1;
586
587 return dso && (!he->branch_info || !he->branch_info->from.map ||
588 he->branch_info->from.map->dso != dso);
589}
590
591static int64_t
592sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
593{
594 if (!left->branch_info || !right->branch_info)
595 return cmp_null(left->branch_info, right->branch_info);
596
597 return _sort__dso_cmp(left->branch_info->to.map,
598 right->branch_info->to.map);
599}
600
601static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
602 size_t size, unsigned int width)
603{
604 if (he->branch_info)
605 return _hist_entry__dso_snprintf(he->branch_info->to.map,
606 bf, size, width);
607 else
608 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
609}
610
611static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
612 const void *arg)
613{
614 const struct dso *dso = arg;
615
616 if (type != HIST_FILTER__DSO)
617 return -1;
618
619 return dso && (!he->branch_info || !he->branch_info->to.map ||
620 he->branch_info->to.map->dso != dso);
621}
622
623static int64_t
624sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
625{
626 struct addr_map_symbol *from_l = &left->branch_info->from;
627 struct addr_map_symbol *from_r = &right->branch_info->from;
628
629 if (!left->branch_info || !right->branch_info)
630 return cmp_null(left->branch_info, right->branch_info);
631
632 from_l = &left->branch_info->from;
633 from_r = &right->branch_info->from;
634
635 if (!from_l->sym && !from_r->sym)
636 return _sort__addr_cmp(from_l->addr, from_r->addr);
637
638 return _sort__sym_cmp(from_l->sym, from_r->sym);
639}
640
641static int64_t
642sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
643{
644 struct addr_map_symbol *to_l, *to_r;
645
646 if (!left->branch_info || !right->branch_info)
647 return cmp_null(left->branch_info, right->branch_info);
648
649 to_l = &left->branch_info->to;
650 to_r = &right->branch_info->to;
651
652 if (!to_l->sym && !to_r->sym)
653 return _sort__addr_cmp(to_l->addr, to_r->addr);
654
655 return _sort__sym_cmp(to_l->sym, to_r->sym);
656}
657
658static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
659 size_t size, unsigned int width)
660{
661 if (he->branch_info) {
662 struct addr_map_symbol *from = &he->branch_info->from;
663
664 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
665 he->level, bf, size, width);
666 }
667
668 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
669}
670
671static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
672 size_t size, unsigned int width)
673{
674 if (he->branch_info) {
675 struct addr_map_symbol *to = &he->branch_info->to;
676
677 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
678 he->level, bf, size, width);
679 }
680
681 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
682}
683
684static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
685 const void *arg)
686{
687 const char *sym = arg;
688
689 if (type != HIST_FILTER__SYMBOL)
690 return -1;
691
692 return sym && !(he->branch_info && he->branch_info->from.sym &&
693 strstr(he->branch_info->from.sym->name, sym));
694}
695
696static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
697 const void *arg)
698{
699 const char *sym = arg;
700
701 if (type != HIST_FILTER__SYMBOL)
702 return -1;
703
704 return sym && !(he->branch_info && he->branch_info->to.sym &&
705 strstr(he->branch_info->to.sym->name, sym));
706}
707
708struct sort_entry sort_dso_from = {
709 .se_header = "Source Shared Object",
710 .se_cmp = sort__dso_from_cmp,
711 .se_snprintf = hist_entry__dso_from_snprintf,
712 .se_filter = hist_entry__dso_from_filter,
713 .se_width_idx = HISTC_DSO_FROM,
714};
715
716struct sort_entry sort_dso_to = {
717 .se_header = "Target Shared Object",
718 .se_cmp = sort__dso_to_cmp,
719 .se_snprintf = hist_entry__dso_to_snprintf,
720 .se_filter = hist_entry__dso_to_filter,
721 .se_width_idx = HISTC_DSO_TO,
722};
723
724struct sort_entry sort_sym_from = {
725 .se_header = "Source Symbol",
726 .se_cmp = sort__sym_from_cmp,
727 .se_snprintf = hist_entry__sym_from_snprintf,
728 .se_filter = hist_entry__sym_from_filter,
729 .se_width_idx = HISTC_SYMBOL_FROM,
730};
731
732struct sort_entry sort_sym_to = {
733 .se_header = "Target Symbol",
734 .se_cmp = sort__sym_to_cmp,
735 .se_snprintf = hist_entry__sym_to_snprintf,
736 .se_filter = hist_entry__sym_to_filter,
737 .se_width_idx = HISTC_SYMBOL_TO,
738};
739
740static int64_t
741sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
742{
743 unsigned char mp, p;
744
745 if (!left->branch_info || !right->branch_info)
746 return cmp_null(left->branch_info, right->branch_info);
747
748 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
749 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
750 return mp || p;
751}
752
753static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
754 size_t size, unsigned int width){
755 static const char *out = "N/A";
756
757 if (he->branch_info) {
758 if (he->branch_info->flags.predicted)
759 out = "N";
760 else if (he->branch_info->flags.mispred)
761 out = "Y";
762 }
763
764 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
765}
766
767static int64_t
768sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
769{
770 return left->branch_info->flags.cycles -
771 right->branch_info->flags.cycles;
772}
773
774static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
775 size_t size, unsigned int width)
776{
777 if (he->branch_info->flags.cycles == 0)
778 return repsep_snprintf(bf, size, "%-*s", width, "-");
779 return repsep_snprintf(bf, size, "%-*hd", width,
780 he->branch_info->flags.cycles);
781}
782
783struct sort_entry sort_cycles = {
784 .se_header = "Basic Block Cycles",
785 .se_cmp = sort__cycles_cmp,
786 .se_snprintf = hist_entry__cycles_snprintf,
787 .se_width_idx = HISTC_CYCLES,
788};
789
790/* --sort daddr_sym */
791static int64_t
792sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
793{
794 uint64_t l = 0, r = 0;
795
796 if (left->mem_info)
797 l = left->mem_info->daddr.addr;
798 if (right->mem_info)
799 r = right->mem_info->daddr.addr;
800
801 return (int64_t)(r - l);
802}
803
804static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
805 size_t size, unsigned int width)
806{
807 uint64_t addr = 0;
808 struct map *map = NULL;
809 struct symbol *sym = NULL;
810
811 if (he->mem_info) {
812 addr = he->mem_info->daddr.addr;
813 map = he->mem_info->daddr.map;
814 sym = he->mem_info->daddr.sym;
815 }
816 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
817 width);
818}
819
820static int64_t
821sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
822{
823 uint64_t l = 0, r = 0;
824
825 if (left->mem_info)
826 l = left->mem_info->iaddr.addr;
827 if (right->mem_info)
828 r = right->mem_info->iaddr.addr;
829
830 return (int64_t)(r - l);
831}
832
833static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
834 size_t size, unsigned int width)
835{
836 uint64_t addr = 0;
837 struct map *map = NULL;
838 struct symbol *sym = NULL;
839
840 if (he->mem_info) {
841 addr = he->mem_info->iaddr.addr;
842 map = he->mem_info->iaddr.map;
843 sym = he->mem_info->iaddr.sym;
844 }
845 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
846 width);
847}
848
849static int64_t
850sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
851{
852 struct map *map_l = NULL;
853 struct map *map_r = NULL;
854
855 if (left->mem_info)
856 map_l = left->mem_info->daddr.map;
857 if (right->mem_info)
858 map_r = right->mem_info->daddr.map;
859
860 return _sort__dso_cmp(map_l, map_r);
861}
862
863static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
864 size_t size, unsigned int width)
865{
866 struct map *map = NULL;
867
868 if (he->mem_info)
869 map = he->mem_info->daddr.map;
870
871 return _hist_entry__dso_snprintf(map, bf, size, width);
872}
873
874static int64_t
875sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
876{
877 union perf_mem_data_src data_src_l;
878 union perf_mem_data_src data_src_r;
879
880 if (left->mem_info)
881 data_src_l = left->mem_info->data_src;
882 else
883 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
884
885 if (right->mem_info)
886 data_src_r = right->mem_info->data_src;
887 else
888 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
889
890 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
891}
892
893static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
894 size_t size, unsigned int width)
895{
896 char out[10];
897
898 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
899 return repsep_snprintf(bf, size, "%.*s", width, out);
900}
901
902static int64_t
903sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
904{
905 union perf_mem_data_src data_src_l;
906 union perf_mem_data_src data_src_r;
907
908 if (left->mem_info)
909 data_src_l = left->mem_info->data_src;
910 else
911 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
912
913 if (right->mem_info)
914 data_src_r = right->mem_info->data_src;
915 else
916 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
917
918 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
919}
920
921static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
922 size_t size, unsigned int width)
923{
924 char out[64];
925
926 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
927 return repsep_snprintf(bf, size, "%-*s", width, out);
928}
929
930static int64_t
931sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
932{
933 union perf_mem_data_src data_src_l;
934 union perf_mem_data_src data_src_r;
935
936 if (left->mem_info)
937 data_src_l = left->mem_info->data_src;
938 else
939 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
940
941 if (right->mem_info)
942 data_src_r = right->mem_info->data_src;
943 else
944 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
945
946 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
947}
948
949static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
950 size_t size, unsigned int width)
951{
952 char out[64];
953
954 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
955 return repsep_snprintf(bf, size, "%-*s", width, out);
956}
957
958static int64_t
959sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
960{
961 union perf_mem_data_src data_src_l;
962 union perf_mem_data_src data_src_r;
963
964 if (left->mem_info)
965 data_src_l = left->mem_info->data_src;
966 else
967 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
968
969 if (right->mem_info)
970 data_src_r = right->mem_info->data_src;
971 else
972 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
973
974 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
975}
976
977static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
978 size_t size, unsigned int width)
979{
980 char out[64];
981
982 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
983 return repsep_snprintf(bf, size, "%-*s", width, out);
984}
985
986static int64_t
987sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
988{
989 u64 l, r;
990 struct map *l_map, *r_map;
991
992 if (!left->mem_info) return -1;
993 if (!right->mem_info) return 1;
994
995 /* group event types together */
996 if (left->cpumode > right->cpumode) return -1;
997 if (left->cpumode < right->cpumode) return 1;
998
999 l_map = left->mem_info->daddr.map;
1000 r_map = right->mem_info->daddr.map;
1001
1002 /* if both are NULL, jump to sort on al_addr instead */
1003 if (!l_map && !r_map)
1004 goto addr;
1005
1006 if (!l_map) return -1;
1007 if (!r_map) return 1;
1008
1009 if (l_map->maj > r_map->maj) return -1;
1010 if (l_map->maj < r_map->maj) return 1;
1011
1012 if (l_map->min > r_map->min) return -1;
1013 if (l_map->min < r_map->min) return 1;
1014
1015 if (l_map->ino > r_map->ino) return -1;
1016 if (l_map->ino < r_map->ino) return 1;
1017
1018 if (l_map->ino_generation > r_map->ino_generation) return -1;
1019 if (l_map->ino_generation < r_map->ino_generation) return 1;
1020
1021 /*
1022 * Addresses with no major/minor numbers are assumed to be
1023 * anonymous in userspace. Sort those on pid then address.
1024 *
1025 * The kernel and non-zero major/minor mapped areas are
1026 * assumed to be unity mapped. Sort those on address.
1027 */
1028
1029 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1030 (!(l_map->flags & MAP_SHARED)) &&
1031 !l_map->maj && !l_map->min && !l_map->ino &&
1032 !l_map->ino_generation) {
1033 /* userspace anonymous */
1034
1035 if (left->thread->pid_ > right->thread->pid_) return -1;
1036 if (left->thread->pid_ < right->thread->pid_) return 1;
1037 }
1038
1039addr:
1040 /* al_addr does all the right addr - start + offset calculations */
1041 l = cl_address(left->mem_info->daddr.al_addr);
1042 r = cl_address(right->mem_info->daddr.al_addr);
1043
1044 if (l > r) return -1;
1045 if (l < r) return 1;
1046
1047 return 0;
1048}
1049
1050static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1051 size_t size, unsigned int width)
1052{
1053
1054 uint64_t addr = 0;
1055 struct map *map = NULL;
1056 struct symbol *sym = NULL;
1057 char level = he->level;
1058
1059 if (he->mem_info) {
1060 addr = cl_address(he->mem_info->daddr.al_addr);
1061 map = he->mem_info->daddr.map;
1062 sym = he->mem_info->daddr.sym;
1063
1064 /* print [s] for shared data mmaps */
1065 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1066 map && (map->type == MAP__VARIABLE) &&
1067 (map->flags & MAP_SHARED) &&
1068 (map->maj || map->min || map->ino ||
1069 map->ino_generation))
1070 level = 's';
1071 else if (!map)
1072 level = 'X';
1073 }
1074 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1075 width);
1076}
1077
1078struct sort_entry sort_mispredict = {
1079 .se_header = "Branch Mispredicted",
1080 .se_cmp = sort__mispredict_cmp,
1081 .se_snprintf = hist_entry__mispredict_snprintf,
1082 .se_width_idx = HISTC_MISPREDICT,
1083};
1084
1085static u64 he_weight(struct hist_entry *he)
1086{
1087 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1088}
1089
1090static int64_t
1091sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1092{
1093 return he_weight(left) - he_weight(right);
1094}
1095
1096static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1097 size_t size, unsigned int width)
1098{
1099 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1100}
1101
1102struct sort_entry sort_local_weight = {
1103 .se_header = "Local Weight",
1104 .se_cmp = sort__local_weight_cmp,
1105 .se_snprintf = hist_entry__local_weight_snprintf,
1106 .se_width_idx = HISTC_LOCAL_WEIGHT,
1107};
1108
1109static int64_t
1110sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1111{
1112 return left->stat.weight - right->stat.weight;
1113}
1114
1115static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1116 size_t size, unsigned int width)
1117{
1118 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1119}
1120
1121struct sort_entry sort_global_weight = {
1122 .se_header = "Weight",
1123 .se_cmp = sort__global_weight_cmp,
1124 .se_snprintf = hist_entry__global_weight_snprintf,
1125 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1126};
1127
1128struct sort_entry sort_mem_daddr_sym = {
1129 .se_header = "Data Symbol",
1130 .se_cmp = sort__daddr_cmp,
1131 .se_snprintf = hist_entry__daddr_snprintf,
1132 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1133};
1134
1135struct sort_entry sort_mem_iaddr_sym = {
1136 .se_header = "Code Symbol",
1137 .se_cmp = sort__iaddr_cmp,
1138 .se_snprintf = hist_entry__iaddr_snprintf,
1139 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1140};
1141
1142struct sort_entry sort_mem_daddr_dso = {
1143 .se_header = "Data Object",
1144 .se_cmp = sort__dso_daddr_cmp,
1145 .se_snprintf = hist_entry__dso_daddr_snprintf,
1146 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1147};
1148
1149struct sort_entry sort_mem_locked = {
1150 .se_header = "Locked",
1151 .se_cmp = sort__locked_cmp,
1152 .se_snprintf = hist_entry__locked_snprintf,
1153 .se_width_idx = HISTC_MEM_LOCKED,
1154};
1155
1156struct sort_entry sort_mem_tlb = {
1157 .se_header = "TLB access",
1158 .se_cmp = sort__tlb_cmp,
1159 .se_snprintf = hist_entry__tlb_snprintf,
1160 .se_width_idx = HISTC_MEM_TLB,
1161};
1162
1163struct sort_entry sort_mem_lvl = {
1164 .se_header = "Memory access",
1165 .se_cmp = sort__lvl_cmp,
1166 .se_snprintf = hist_entry__lvl_snprintf,
1167 .se_width_idx = HISTC_MEM_LVL,
1168};
1169
1170struct sort_entry sort_mem_snoop = {
1171 .se_header = "Snoop",
1172 .se_cmp = sort__snoop_cmp,
1173 .se_snprintf = hist_entry__snoop_snprintf,
1174 .se_width_idx = HISTC_MEM_SNOOP,
1175};
1176
1177struct sort_entry sort_mem_dcacheline = {
1178 .se_header = "Data Cacheline",
1179 .se_cmp = sort__dcacheline_cmp,
1180 .se_snprintf = hist_entry__dcacheline_snprintf,
1181 .se_width_idx = HISTC_MEM_DCACHELINE,
1182};
1183
1184static int64_t
1185sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1186{
1187 if (!left->branch_info || !right->branch_info)
1188 return cmp_null(left->branch_info, right->branch_info);
1189
1190 return left->branch_info->flags.abort !=
1191 right->branch_info->flags.abort;
1192}
1193
1194static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1195 size_t size, unsigned int width)
1196{
1197 static const char *out = "N/A";
1198
1199 if (he->branch_info) {
1200 if (he->branch_info->flags.abort)
1201 out = "A";
1202 else
1203 out = ".";
1204 }
1205
1206 return repsep_snprintf(bf, size, "%-*s", width, out);
1207}
1208
1209struct sort_entry sort_abort = {
1210 .se_header = "Transaction abort",
1211 .se_cmp = sort__abort_cmp,
1212 .se_snprintf = hist_entry__abort_snprintf,
1213 .se_width_idx = HISTC_ABORT,
1214};
1215
1216static int64_t
1217sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1218{
1219 if (!left->branch_info || !right->branch_info)
1220 return cmp_null(left->branch_info, right->branch_info);
1221
1222 return left->branch_info->flags.in_tx !=
1223 right->branch_info->flags.in_tx;
1224}
1225
1226static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1227 size_t size, unsigned int width)
1228{
1229 static const char *out = "N/A";
1230
1231 if (he->branch_info) {
1232 if (he->branch_info->flags.in_tx)
1233 out = "T";
1234 else
1235 out = ".";
1236 }
1237
1238 return repsep_snprintf(bf, size, "%-*s", width, out);
1239}
1240
1241struct sort_entry sort_in_tx = {
1242 .se_header = "Branch in transaction",
1243 .se_cmp = sort__in_tx_cmp,
1244 .se_snprintf = hist_entry__in_tx_snprintf,
1245 .se_width_idx = HISTC_IN_TX,
1246};
1247
1248static int64_t
1249sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1250{
1251 return left->transaction - right->transaction;
1252}
1253
1254static inline char *add_str(char *p, const char *str)
1255{
1256 strcpy(p, str);
1257 return p + strlen(str);
1258}
1259
1260static struct txbit {
1261 unsigned flag;
1262 const char *name;
1263 int skip_for_len;
1264} txbits[] = {
1265 { PERF_TXN_ELISION, "EL ", 0 },
1266 { PERF_TXN_TRANSACTION, "TX ", 1 },
1267 { PERF_TXN_SYNC, "SYNC ", 1 },
1268 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1269 { PERF_TXN_RETRY, "RETRY ", 0 },
1270 { PERF_TXN_CONFLICT, "CON ", 0 },
1271 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1272 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1273 { 0, NULL, 0 }
1274};
1275
1276int hist_entry__transaction_len(void)
1277{
1278 int i;
1279 int len = 0;
1280
1281 for (i = 0; txbits[i].name; i++) {
1282 if (!txbits[i].skip_for_len)
1283 len += strlen(txbits[i].name);
1284 }
1285 len += 4; /* :XX<space> */
1286 return len;
1287}
1288
1289static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1290 size_t size, unsigned int width)
1291{
1292 u64 t = he->transaction;
1293 char buf[128];
1294 char *p = buf;
1295 int i;
1296
1297 buf[0] = 0;
1298 for (i = 0; txbits[i].name; i++)
1299 if (txbits[i].flag & t)
1300 p = add_str(p, txbits[i].name);
1301 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1302 p = add_str(p, "NEITHER ");
1303 if (t & PERF_TXN_ABORT_MASK) {
1304 sprintf(p, ":%" PRIx64,
1305 (t & PERF_TXN_ABORT_MASK) >>
1306 PERF_TXN_ABORT_SHIFT);
1307 p += strlen(p);
1308 }
1309
1310 return repsep_snprintf(bf, size, "%-*s", width, buf);
1311}
1312
1313struct sort_entry sort_transaction = {
1314 .se_header = "Transaction ",
1315 .se_cmp = sort__transaction_cmp,
1316 .se_snprintf = hist_entry__transaction_snprintf,
1317 .se_width_idx = HISTC_TRANSACTION,
1318};
1319
1320struct sort_dimension {
1321 const char *name;
1322 struct sort_entry *entry;
1323 int taken;
1324};
1325
1326#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1327
1328static struct sort_dimension common_sort_dimensions[] = {
1329 DIM(SORT_PID, "pid", sort_thread),
1330 DIM(SORT_COMM, "comm", sort_comm),
1331 DIM(SORT_DSO, "dso", sort_dso),
1332 DIM(SORT_SYM, "symbol", sort_sym),
1333 DIM(SORT_PARENT, "parent", sort_parent),
1334 DIM(SORT_CPU, "cpu", sort_cpu),
1335 DIM(SORT_SOCKET, "socket", sort_socket),
1336 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1337 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1338 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1339 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1340 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1341 DIM(SORT_TRACE, "trace", sort_trace),
1342};
1343
1344#undef DIM
1345
1346#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1347
1348static struct sort_dimension bstack_sort_dimensions[] = {
1349 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1350 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1351 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1352 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1353 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1354 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1355 DIM(SORT_ABORT, "abort", sort_abort),
1356 DIM(SORT_CYCLES, "cycles", sort_cycles),
1357};
1358
1359#undef DIM
1360
1361#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1362
1363static struct sort_dimension memory_sort_dimensions[] = {
1364 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1365 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1366 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1367 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1368 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1369 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1370 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1371 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1372};
1373
1374#undef DIM
1375
1376struct hpp_dimension {
1377 const char *name;
1378 struct perf_hpp_fmt *fmt;
1379 int taken;
1380};
1381
1382#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1383
1384static struct hpp_dimension hpp_sort_dimensions[] = {
1385 DIM(PERF_HPP__OVERHEAD, "overhead"),
1386 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1387 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1388 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1389 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1390 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1391 DIM(PERF_HPP__SAMPLES, "sample"),
1392 DIM(PERF_HPP__PERIOD, "period"),
1393};
1394
1395#undef DIM
1396
1397struct hpp_sort_entry {
1398 struct perf_hpp_fmt hpp;
1399 struct sort_entry *se;
1400};
1401
1402void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1403{
1404 struct hpp_sort_entry *hse;
1405
1406 if (!perf_hpp__is_sort_entry(fmt))
1407 return;
1408
1409 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1410 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1411}
1412
1413static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1414 struct perf_evsel *evsel)
1415{
1416 struct hpp_sort_entry *hse;
1417 size_t len = fmt->user_len;
1418
1419 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1420
1421 if (!len)
1422 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1423
1424 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1425}
1426
1427static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1428 struct perf_hpp *hpp __maybe_unused,
1429 struct perf_evsel *evsel)
1430{
1431 struct hpp_sort_entry *hse;
1432 size_t len = fmt->user_len;
1433
1434 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1435
1436 if (!len)
1437 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1438
1439 return len;
1440}
1441
1442static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1443 struct hist_entry *he)
1444{
1445 struct hpp_sort_entry *hse;
1446 size_t len = fmt->user_len;
1447
1448 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1449
1450 if (!len)
1451 len = hists__col_len(he->hists, hse->se->se_width_idx);
1452
1453 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1454}
1455
1456static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1457 struct hist_entry *a, struct hist_entry *b)
1458{
1459 struct hpp_sort_entry *hse;
1460
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1462 return hse->se->se_cmp(a, b);
1463}
1464
1465static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1466 struct hist_entry *a, struct hist_entry *b)
1467{
1468 struct hpp_sort_entry *hse;
1469 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1470
1471 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1472 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1473 return collapse_fn(a, b);
1474}
1475
1476static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1477 struct hist_entry *a, struct hist_entry *b)
1478{
1479 struct hpp_sort_entry *hse;
1480 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1481
1482 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1483 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1484 return sort_fn(a, b);
1485}
1486
1487bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1488{
1489 return format->header == __sort__hpp_header;
1490}
1491
1492#define MK_SORT_ENTRY_CHK(key) \
1493bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1494{ \
1495 struct hpp_sort_entry *hse; \
1496 \
1497 if (!perf_hpp__is_sort_entry(fmt)) \
1498 return false; \
1499 \
1500 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1501 return hse->se == &sort_ ## key ; \
1502}
1503
1504MK_SORT_ENTRY_CHK(trace)
1505MK_SORT_ENTRY_CHK(srcline)
1506MK_SORT_ENTRY_CHK(srcfile)
1507MK_SORT_ENTRY_CHK(thread)
1508MK_SORT_ENTRY_CHK(comm)
1509MK_SORT_ENTRY_CHK(dso)
1510MK_SORT_ENTRY_CHK(sym)
1511
1512
1513static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1514{
1515 struct hpp_sort_entry *hse_a;
1516 struct hpp_sort_entry *hse_b;
1517
1518 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1519 return false;
1520
1521 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1522 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1523
1524 return hse_a->se == hse_b->se;
1525}
1526
1527static void hse_free(struct perf_hpp_fmt *fmt)
1528{
1529 struct hpp_sort_entry *hse;
1530
1531 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1532 free(hse);
1533}
1534
1535static struct hpp_sort_entry *
1536__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1537{
1538 struct hpp_sort_entry *hse;
1539
1540 hse = malloc(sizeof(*hse));
1541 if (hse == NULL) {
1542 pr_err("Memory allocation failed\n");
1543 return NULL;
1544 }
1545
1546 hse->se = sd->entry;
1547 hse->hpp.name = sd->entry->se_header;
1548 hse->hpp.header = __sort__hpp_header;
1549 hse->hpp.width = __sort__hpp_width;
1550 hse->hpp.entry = __sort__hpp_entry;
1551 hse->hpp.color = NULL;
1552
1553 hse->hpp.cmp = __sort__hpp_cmp;
1554 hse->hpp.collapse = __sort__hpp_collapse;
1555 hse->hpp.sort = __sort__hpp_sort;
1556 hse->hpp.equal = __sort__hpp_equal;
1557 hse->hpp.free = hse_free;
1558
1559 INIT_LIST_HEAD(&hse->hpp.list);
1560 INIT_LIST_HEAD(&hse->hpp.sort_list);
1561 hse->hpp.elide = false;
1562 hse->hpp.len = 0;
1563 hse->hpp.user_len = 0;
1564 hse->hpp.level = level;
1565
1566 return hse;
1567}
1568
1569static void hpp_free(struct perf_hpp_fmt *fmt)
1570{
1571 free(fmt);
1572}
1573
1574static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1575 int level)
1576{
1577 struct perf_hpp_fmt *fmt;
1578
1579 fmt = memdup(hd->fmt, sizeof(*fmt));
1580 if (fmt) {
1581 INIT_LIST_HEAD(&fmt->list);
1582 INIT_LIST_HEAD(&fmt->sort_list);
1583 fmt->free = hpp_free;
1584 fmt->level = level;
1585 }
1586
1587 return fmt;
1588}
1589
1590int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1591{
1592 struct perf_hpp_fmt *fmt;
1593 struct hpp_sort_entry *hse;
1594 int ret = -1;
1595 int r;
1596
1597 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1598 if (!perf_hpp__is_sort_entry(fmt))
1599 continue;
1600
1601 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1602 if (hse->se->se_filter == NULL)
1603 continue;
1604
1605 /*
1606 * hist entry is filtered if any of sort key in the hpp list
1607 * is applied. But it should skip non-matched filter types.
1608 */
1609 r = hse->se->se_filter(he, type, arg);
1610 if (r >= 0) {
1611 if (ret < 0)
1612 ret = 0;
1613 ret |= r;
1614 }
1615 }
1616
1617 return ret;
1618}
1619
1620static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1621 struct perf_hpp_list *list,
1622 int level)
1623{
1624 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1625
1626 if (hse == NULL)
1627 return -1;
1628
1629 perf_hpp_list__register_sort_field(list, &hse->hpp);
1630 return 0;
1631}
1632
1633static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1634 struct perf_hpp_list *list)
1635{
1636 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1637
1638 if (hse == NULL)
1639 return -1;
1640
1641 perf_hpp_list__column_register(list, &hse->hpp);
1642 return 0;
1643}
1644
1645struct hpp_dynamic_entry {
1646 struct perf_hpp_fmt hpp;
1647 struct perf_evsel *evsel;
1648 struct format_field *field;
1649 unsigned dynamic_len;
1650 bool raw_trace;
1651};
1652
1653static int hde_width(struct hpp_dynamic_entry *hde)
1654{
1655 if (!hde->hpp.len) {
1656 int len = hde->dynamic_len;
1657 int namelen = strlen(hde->field->name);
1658 int fieldlen = hde->field->size;
1659
1660 if (namelen > len)
1661 len = namelen;
1662
1663 if (!(hde->field->flags & FIELD_IS_STRING)) {
1664 /* length for print hex numbers */
1665 fieldlen = hde->field->size * 2 + 2;
1666 }
1667 if (fieldlen > len)
1668 len = fieldlen;
1669
1670 hde->hpp.len = len;
1671 }
1672 return hde->hpp.len;
1673}
1674
1675static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1676 struct hist_entry *he)
1677{
1678 char *str, *pos;
1679 struct format_field *field = hde->field;
1680 size_t namelen;
1681 bool last = false;
1682
1683 if (hde->raw_trace)
1684 return;
1685
1686 /* parse pretty print result and update max length */
1687 if (!he->trace_output)
1688 he->trace_output = get_trace_output(he);
1689
1690 namelen = strlen(field->name);
1691 str = he->trace_output;
1692
1693 while (str) {
1694 pos = strchr(str, ' ');
1695 if (pos == NULL) {
1696 last = true;
1697 pos = str + strlen(str);
1698 }
1699
1700 if (!strncmp(str, field->name, namelen)) {
1701 size_t len;
1702
1703 str += namelen + 1;
1704 len = pos - str;
1705
1706 if (len > hde->dynamic_len)
1707 hde->dynamic_len = len;
1708 break;
1709 }
1710
1711 if (last)
1712 str = NULL;
1713 else
1714 str = pos + 1;
1715 }
1716}
1717
1718static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1719 struct perf_evsel *evsel __maybe_unused)
1720{
1721 struct hpp_dynamic_entry *hde;
1722 size_t len = fmt->user_len;
1723
1724 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1725
1726 if (!len)
1727 len = hde_width(hde);
1728
1729 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1730}
1731
1732static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1733 struct perf_hpp *hpp __maybe_unused,
1734 struct perf_evsel *evsel __maybe_unused)
1735{
1736 struct hpp_dynamic_entry *hde;
1737 size_t len = fmt->user_len;
1738
1739 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1740
1741 if (!len)
1742 len = hde_width(hde);
1743
1744 return len;
1745}
1746
1747bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1748{
1749 struct hpp_dynamic_entry *hde;
1750
1751 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1752
1753 return hists_to_evsel(hists) == hde->evsel;
1754}
1755
1756static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1757 struct hist_entry *he)
1758{
1759 struct hpp_dynamic_entry *hde;
1760 size_t len = fmt->user_len;
1761 char *str, *pos;
1762 struct format_field *field;
1763 size_t namelen;
1764 bool last = false;
1765 int ret;
1766
1767 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1768
1769 if (!len)
1770 len = hde_width(hde);
1771
1772 if (hde->raw_trace)
1773 goto raw_field;
1774
1775 if (!he->trace_output)
1776 he->trace_output = get_trace_output(he);
1777
1778 field = hde->field;
1779 namelen = strlen(field->name);
1780 str = he->trace_output;
1781
1782 while (str) {
1783 pos = strchr(str, ' ');
1784 if (pos == NULL) {
1785 last = true;
1786 pos = str + strlen(str);
1787 }
1788
1789 if (!strncmp(str, field->name, namelen)) {
1790 str += namelen + 1;
1791 str = strndup(str, pos - str);
1792
1793 if (str == NULL)
1794 return scnprintf(hpp->buf, hpp->size,
1795 "%*.*s", len, len, "ERROR");
1796 break;
1797 }
1798
1799 if (last)
1800 str = NULL;
1801 else
1802 str = pos + 1;
1803 }
1804
1805 if (str == NULL) {
1806 struct trace_seq seq;
1807raw_field:
1808 trace_seq_init(&seq);
1809 pevent_print_field(&seq, he->raw_data, hde->field);
1810 str = seq.buffer;
1811 }
1812
1813 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1814 free(str);
1815 return ret;
1816}
1817
1818static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1819 struct hist_entry *a, struct hist_entry *b)
1820{
1821 struct hpp_dynamic_entry *hde;
1822 struct format_field *field;
1823 unsigned offset, size;
1824
1825 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1826
1827 if (b == NULL) {
1828 update_dynamic_len(hde, a);
1829 return 0;
1830 }
1831
1832 field = hde->field;
1833 if (field->flags & FIELD_IS_DYNAMIC) {
1834 unsigned long long dyn;
1835
1836 pevent_read_number_field(field, a->raw_data, &dyn);
1837 offset = dyn & 0xffff;
1838 size = (dyn >> 16) & 0xffff;
1839
1840 /* record max width for output */
1841 if (size > hde->dynamic_len)
1842 hde->dynamic_len = size;
1843 } else {
1844 offset = field->offset;
1845 size = field->size;
1846 }
1847
1848 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1849}
1850
1851bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1852{
1853 return fmt->cmp == __sort__hde_cmp;
1854}
1855
1856static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1857{
1858 struct hpp_dynamic_entry *hde_a;
1859 struct hpp_dynamic_entry *hde_b;
1860
1861 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
1862 return false;
1863
1864 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
1865 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
1866
1867 return hde_a->field == hde_b->field;
1868}
1869
1870static void hde_free(struct perf_hpp_fmt *fmt)
1871{
1872 struct hpp_dynamic_entry *hde;
1873
1874 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1875 free(hde);
1876}
1877
1878static struct hpp_dynamic_entry *
1879__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
1880 int level)
1881{
1882 struct hpp_dynamic_entry *hde;
1883
1884 hde = malloc(sizeof(*hde));
1885 if (hde == NULL) {
1886 pr_debug("Memory allocation failed\n");
1887 return NULL;
1888 }
1889
1890 hde->evsel = evsel;
1891 hde->field = field;
1892 hde->dynamic_len = 0;
1893
1894 hde->hpp.name = field->name;
1895 hde->hpp.header = __sort__hde_header;
1896 hde->hpp.width = __sort__hde_width;
1897 hde->hpp.entry = __sort__hde_entry;
1898 hde->hpp.color = NULL;
1899
1900 hde->hpp.cmp = __sort__hde_cmp;
1901 hde->hpp.collapse = __sort__hde_cmp;
1902 hde->hpp.sort = __sort__hde_cmp;
1903 hde->hpp.equal = __sort__hde_equal;
1904 hde->hpp.free = hde_free;
1905
1906 INIT_LIST_HEAD(&hde->hpp.list);
1907 INIT_LIST_HEAD(&hde->hpp.sort_list);
1908 hde->hpp.elide = false;
1909 hde->hpp.len = 0;
1910 hde->hpp.user_len = 0;
1911 hde->hpp.level = level;
1912
1913 return hde;
1914}
1915
1916struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
1917{
1918 struct perf_hpp_fmt *new_fmt = NULL;
1919
1920 if (perf_hpp__is_sort_entry(fmt)) {
1921 struct hpp_sort_entry *hse, *new_hse;
1922
1923 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1924 new_hse = memdup(hse, sizeof(*hse));
1925 if (new_hse)
1926 new_fmt = &new_hse->hpp;
1927 } else if (perf_hpp__is_dynamic_entry(fmt)) {
1928 struct hpp_dynamic_entry *hde, *new_hde;
1929
1930 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1931 new_hde = memdup(hde, sizeof(*hde));
1932 if (new_hde)
1933 new_fmt = &new_hde->hpp;
1934 } else {
1935 new_fmt = memdup(fmt, sizeof(*fmt));
1936 }
1937
1938 INIT_LIST_HEAD(&new_fmt->list);
1939 INIT_LIST_HEAD(&new_fmt->sort_list);
1940
1941 return new_fmt;
1942}
1943
1944static int parse_field_name(char *str, char **event, char **field, char **opt)
1945{
1946 char *event_name, *field_name, *opt_name;
1947
1948 event_name = str;
1949 field_name = strchr(str, '.');
1950
1951 if (field_name) {
1952 *field_name++ = '\0';
1953 } else {
1954 event_name = NULL;
1955 field_name = str;
1956 }
1957
1958 opt_name = strchr(field_name, '/');
1959 if (opt_name)
1960 *opt_name++ = '\0';
1961
1962 *event = event_name;
1963 *field = field_name;
1964 *opt = opt_name;
1965
1966 return 0;
1967}
1968
1969/* find match evsel using a given event name. The event name can be:
1970 * 1. '%' + event index (e.g. '%1' for first event)
1971 * 2. full event name (e.g. sched:sched_switch)
1972 * 3. partial event name (should not contain ':')
1973 */
1974static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1975{
1976 struct perf_evsel *evsel = NULL;
1977 struct perf_evsel *pos;
1978 bool full_name;
1979
1980 /* case 1 */
1981 if (event_name[0] == '%') {
1982 int nr = strtol(event_name+1, NULL, 0);
1983
1984 if (nr > evlist->nr_entries)
1985 return NULL;
1986
1987 evsel = perf_evlist__first(evlist);
1988 while (--nr > 0)
1989 evsel = perf_evsel__next(evsel);
1990
1991 return evsel;
1992 }
1993
1994 full_name = !!strchr(event_name, ':');
1995 evlist__for_each(evlist, pos) {
1996 /* case 2 */
1997 if (full_name && !strcmp(pos->name, event_name))
1998 return pos;
1999 /* case 3 */
2000 if (!full_name && strstr(pos->name, event_name)) {
2001 if (evsel) {
2002 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2003 event_name, evsel->name, pos->name);
2004 return NULL;
2005 }
2006 evsel = pos;
2007 }
2008 }
2009
2010 return evsel;
2011}
2012
2013static int __dynamic_dimension__add(struct perf_evsel *evsel,
2014 struct format_field *field,
2015 bool raw_trace, int level)
2016{
2017 struct hpp_dynamic_entry *hde;
2018
2019 hde = __alloc_dynamic_entry(evsel, field, level);
2020 if (hde == NULL)
2021 return -ENOMEM;
2022
2023 hde->raw_trace = raw_trace;
2024
2025 perf_hpp__register_sort_field(&hde->hpp);
2026 return 0;
2027}
2028
2029static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2030{
2031 int ret;
2032 struct format_field *field;
2033
2034 field = evsel->tp_format->format.fields;
2035 while (field) {
2036 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2037 if (ret < 0)
2038 return ret;
2039
2040 field = field->next;
2041 }
2042 return 0;
2043}
2044
2045static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2046 int level)
2047{
2048 int ret;
2049 struct perf_evsel *evsel;
2050
2051 evlist__for_each(evlist, evsel) {
2052 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2053 continue;
2054
2055 ret = add_evsel_fields(evsel, raw_trace, level);
2056 if (ret < 0)
2057 return ret;
2058 }
2059 return 0;
2060}
2061
2062static int add_all_matching_fields(struct perf_evlist *evlist,
2063 char *field_name, bool raw_trace, int level)
2064{
2065 int ret = -ESRCH;
2066 struct perf_evsel *evsel;
2067 struct format_field *field;
2068
2069 evlist__for_each(evlist, evsel) {
2070 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2071 continue;
2072
2073 field = pevent_find_any_field(evsel->tp_format, field_name);
2074 if (field == NULL)
2075 continue;
2076
2077 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2078 if (ret < 0)
2079 break;
2080 }
2081 return ret;
2082}
2083
2084static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2085 int level)
2086{
2087 char *str, *event_name, *field_name, *opt_name;
2088 struct perf_evsel *evsel;
2089 struct format_field *field;
2090 bool raw_trace = symbol_conf.raw_trace;
2091 int ret = 0;
2092
2093 if (evlist == NULL)
2094 return -ENOENT;
2095
2096 str = strdup(tok);
2097 if (str == NULL)
2098 return -ENOMEM;
2099
2100 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2101 ret = -EINVAL;
2102 goto out;
2103 }
2104
2105 if (opt_name) {
2106 if (strcmp(opt_name, "raw")) {
2107 pr_debug("unsupported field option %s\n", opt_name);
2108 ret = -EINVAL;
2109 goto out;
2110 }
2111 raw_trace = true;
2112 }
2113
2114 if (!strcmp(field_name, "trace_fields")) {
2115 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2116 goto out;
2117 }
2118
2119 if (event_name == NULL) {
2120 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2121 goto out;
2122 }
2123
2124 evsel = find_evsel(evlist, event_name);
2125 if (evsel == NULL) {
2126 pr_debug("Cannot find event: %s\n", event_name);
2127 ret = -ENOENT;
2128 goto out;
2129 }
2130
2131 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2132 pr_debug("%s is not a tracepoint event\n", event_name);
2133 ret = -EINVAL;
2134 goto out;
2135 }
2136
2137 if (!strcmp(field_name, "*")) {
2138 ret = add_evsel_fields(evsel, raw_trace, level);
2139 } else {
2140 field = pevent_find_any_field(evsel->tp_format, field_name);
2141 if (field == NULL) {
2142 pr_debug("Cannot find event field for %s.%s\n",
2143 event_name, field_name);
2144 return -ENOENT;
2145 }
2146
2147 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2148 }
2149
2150out:
2151 free(str);
2152 return ret;
2153}
2154
2155static int __sort_dimension__add(struct sort_dimension *sd,
2156 struct perf_hpp_list *list,
2157 int level)
2158{
2159 if (sd->taken)
2160 return 0;
2161
2162 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2163 return -1;
2164
2165 if (sd->entry->se_collapse)
2166 sort__need_collapse = 1;
2167
2168 sd->taken = 1;
2169
2170 return 0;
2171}
2172
2173static int __hpp_dimension__add(struct hpp_dimension *hd,
2174 struct perf_hpp_list *list,
2175 int level)
2176{
2177 struct perf_hpp_fmt *fmt;
2178
2179 if (hd->taken)
2180 return 0;
2181
2182 fmt = __hpp_dimension__alloc_hpp(hd, level);
2183 if (!fmt)
2184 return -1;
2185
2186 hd->taken = 1;
2187 perf_hpp_list__register_sort_field(list, fmt);
2188 return 0;
2189}
2190
2191static int __sort_dimension__add_output(struct perf_hpp_list *list,
2192 struct sort_dimension *sd)
2193{
2194 if (sd->taken)
2195 return 0;
2196
2197 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2198 return -1;
2199
2200 sd->taken = 1;
2201 return 0;
2202}
2203
2204static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2205 struct hpp_dimension *hd)
2206{
2207 struct perf_hpp_fmt *fmt;
2208
2209 if (hd->taken)
2210 return 0;
2211
2212 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2213 if (!fmt)
2214 return -1;
2215
2216 hd->taken = 1;
2217 perf_hpp_list__column_register(list, fmt);
2218 return 0;
2219}
2220
2221int hpp_dimension__add_output(unsigned col)
2222{
2223 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2224 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2225}
2226
2227static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2228 struct perf_evlist *evlist,
2229 int level)
2230{
2231 unsigned int i;
2232
2233 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2234 struct sort_dimension *sd = &common_sort_dimensions[i];
2235
2236 if (strncasecmp(tok, sd->name, strlen(tok)))
2237 continue;
2238
2239 if (sd->entry == &sort_parent) {
2240 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2241 if (ret) {
2242 char err[BUFSIZ];
2243
2244 regerror(ret, &parent_regex, err, sizeof(err));
2245 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2246 return -EINVAL;
2247 }
2248 sort__has_parent = 1;
2249 } else if (sd->entry == &sort_sym) {
2250 sort__has_sym = 1;
2251 /*
2252 * perf diff displays the performance difference amongst
2253 * two or more perf.data files. Those files could come
2254 * from different binaries. So we should not compare
2255 * their ips, but the name of symbol.
2256 */
2257 if (sort__mode == SORT_MODE__DIFF)
2258 sd->entry->se_collapse = sort__sym_sort;
2259
2260 } else if (sd->entry == &sort_dso) {
2261 sort__has_dso = 1;
2262 } else if (sd->entry == &sort_socket) {
2263 sort__has_socket = 1;
2264 } else if (sd->entry == &sort_thread) {
2265 sort__has_thread = 1;
2266 } else if (sd->entry == &sort_comm) {
2267 sort__has_comm = 1;
2268 }
2269
2270 return __sort_dimension__add(sd, list, level);
2271 }
2272
2273 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2274 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2275
2276 if (strncasecmp(tok, hd->name, strlen(tok)))
2277 continue;
2278
2279 return __hpp_dimension__add(hd, list, level);
2280 }
2281
2282 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2283 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2284
2285 if (strncasecmp(tok, sd->name, strlen(tok)))
2286 continue;
2287
2288 if (sort__mode != SORT_MODE__BRANCH)
2289 return -EINVAL;
2290
2291 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2292 sort__has_sym = 1;
2293
2294 __sort_dimension__add(sd, list, level);
2295 return 0;
2296 }
2297
2298 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2299 struct sort_dimension *sd = &memory_sort_dimensions[i];
2300
2301 if (strncasecmp(tok, sd->name, strlen(tok)))
2302 continue;
2303
2304 if (sort__mode != SORT_MODE__MEMORY)
2305 return -EINVAL;
2306
2307 if (sd->entry == &sort_mem_daddr_sym)
2308 sort__has_sym = 1;
2309
2310 __sort_dimension__add(sd, list, level);
2311 return 0;
2312 }
2313
2314 if (!add_dynamic_entry(evlist, tok, level))
2315 return 0;
2316
2317 return -ESRCH;
2318}
2319
2320static int setup_sort_list(struct perf_hpp_list *list, char *str,
2321 struct perf_evlist *evlist)
2322{
2323 char *tmp, *tok;
2324 int ret = 0;
2325 int level = 0;
2326 int next_level = 1;
2327 bool in_group = false;
2328
2329 do {
2330 tok = str;
2331 tmp = strpbrk(str, "{}, ");
2332 if (tmp) {
2333 if (in_group)
2334 next_level = level;
2335 else
2336 next_level = level + 1;
2337
2338 if (*tmp == '{')
2339 in_group = true;
2340 else if (*tmp == '}')
2341 in_group = false;
2342
2343 *tmp = '\0';
2344 str = tmp + 1;
2345 }
2346
2347 if (*tok) {
2348 ret = sort_dimension__add(list, tok, evlist, level);
2349 if (ret == -EINVAL) {
2350 error("Invalid --sort key: `%s'", tok);
2351 break;
2352 } else if (ret == -ESRCH) {
2353 error("Unknown --sort key: `%s'", tok);
2354 break;
2355 }
2356 }
2357
2358 level = next_level;
2359 } while (tmp);
2360
2361 return ret;
2362}
2363
2364static const char *get_default_sort_order(struct perf_evlist *evlist)
2365{
2366 const char *default_sort_orders[] = {
2367 default_sort_order,
2368 default_branch_sort_order,
2369 default_mem_sort_order,
2370 default_top_sort_order,
2371 default_diff_sort_order,
2372 default_tracepoint_sort_order,
2373 };
2374 bool use_trace = true;
2375 struct perf_evsel *evsel;
2376
2377 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2378
2379 if (evlist == NULL)
2380 goto out_no_evlist;
2381
2382 evlist__for_each(evlist, evsel) {
2383 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2384 use_trace = false;
2385 break;
2386 }
2387 }
2388
2389 if (use_trace) {
2390 sort__mode = SORT_MODE__TRACEPOINT;
2391 if (symbol_conf.raw_trace)
2392 return "trace_fields";
2393 }
2394out_no_evlist:
2395 return default_sort_orders[sort__mode];
2396}
2397
2398static int setup_sort_order(struct perf_evlist *evlist)
2399{
2400 char *new_sort_order;
2401
2402 /*
2403 * Append '+'-prefixed sort order to the default sort
2404 * order string.
2405 */
2406 if (!sort_order || is_strict_order(sort_order))
2407 return 0;
2408
2409 if (sort_order[1] == '\0') {
2410 error("Invalid --sort key: `+'");
2411 return -EINVAL;
2412 }
2413
2414 /*
2415 * We allocate new sort_order string, but we never free it,
2416 * because it's checked over the rest of the code.
2417 */
2418 if (asprintf(&new_sort_order, "%s,%s",
2419 get_default_sort_order(evlist), sort_order + 1) < 0) {
2420 error("Not enough memory to set up --sort");
2421 return -ENOMEM;
2422 }
2423
2424 sort_order = new_sort_order;
2425 return 0;
2426}
2427
2428/*
2429 * Adds 'pre,' prefix into 'str' is 'pre' is
2430 * not already part of 'str'.
2431 */
2432static char *prefix_if_not_in(const char *pre, char *str)
2433{
2434 char *n;
2435
2436 if (!str || strstr(str, pre))
2437 return str;
2438
2439 if (asprintf(&n, "%s,%s", pre, str) < 0)
2440 return NULL;
2441
2442 free(str);
2443 return n;
2444}
2445
2446static char *setup_overhead(char *keys)
2447{
2448 if (sort__mode == SORT_MODE__DIFF)
2449 return keys;
2450
2451 keys = prefix_if_not_in("overhead", keys);
2452
2453 if (symbol_conf.cumulate_callchain)
2454 keys = prefix_if_not_in("overhead_children", keys);
2455
2456 return keys;
2457}
2458
2459static int __setup_sorting(struct perf_evlist *evlist)
2460{
2461 char *str;
2462 const char *sort_keys;
2463 int ret = 0;
2464
2465 ret = setup_sort_order(evlist);
2466 if (ret)
2467 return ret;
2468
2469 sort_keys = sort_order;
2470 if (sort_keys == NULL) {
2471 if (is_strict_order(field_order)) {
2472 /*
2473 * If user specified field order but no sort order,
2474 * we'll honor it and not add default sort orders.
2475 */
2476 return 0;
2477 }
2478
2479 sort_keys = get_default_sort_order(evlist);
2480 }
2481
2482 str = strdup(sort_keys);
2483 if (str == NULL) {
2484 error("Not enough memory to setup sort keys");
2485 return -ENOMEM;
2486 }
2487
2488 /*
2489 * Prepend overhead fields for backward compatibility.
2490 */
2491 if (!is_strict_order(field_order)) {
2492 str = setup_overhead(str);
2493 if (str == NULL) {
2494 error("Not enough memory to setup overhead keys");
2495 return -ENOMEM;
2496 }
2497 }
2498
2499 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2500
2501 free(str);
2502 return ret;
2503}
2504
2505void perf_hpp__set_elide(int idx, bool elide)
2506{
2507 struct perf_hpp_fmt *fmt;
2508 struct hpp_sort_entry *hse;
2509
2510 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2511 if (!perf_hpp__is_sort_entry(fmt))
2512 continue;
2513
2514 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2515 if (hse->se->se_width_idx == idx) {
2516 fmt->elide = elide;
2517 break;
2518 }
2519 }
2520}
2521
2522static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2523{
2524 if (list && strlist__nr_entries(list) == 1) {
2525 if (fp != NULL)
2526 fprintf(fp, "# %s: %s\n", list_name,
2527 strlist__entry(list, 0)->s);
2528 return true;
2529 }
2530 return false;
2531}
2532
2533static bool get_elide(int idx, FILE *output)
2534{
2535 switch (idx) {
2536 case HISTC_SYMBOL:
2537 return __get_elide(symbol_conf.sym_list, "symbol", output);
2538 case HISTC_DSO:
2539 return __get_elide(symbol_conf.dso_list, "dso", output);
2540 case HISTC_COMM:
2541 return __get_elide(symbol_conf.comm_list, "comm", output);
2542 default:
2543 break;
2544 }
2545
2546 if (sort__mode != SORT_MODE__BRANCH)
2547 return false;
2548
2549 switch (idx) {
2550 case HISTC_SYMBOL_FROM:
2551 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2552 case HISTC_SYMBOL_TO:
2553 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2554 case HISTC_DSO_FROM:
2555 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2556 case HISTC_DSO_TO:
2557 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2558 default:
2559 break;
2560 }
2561
2562 return false;
2563}
2564
2565void sort__setup_elide(FILE *output)
2566{
2567 struct perf_hpp_fmt *fmt;
2568 struct hpp_sort_entry *hse;
2569
2570 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2571 if (!perf_hpp__is_sort_entry(fmt))
2572 continue;
2573
2574 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2575 fmt->elide = get_elide(hse->se->se_width_idx, output);
2576 }
2577
2578 /*
2579 * It makes no sense to elide all of sort entries.
2580 * Just revert them to show up again.
2581 */
2582 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2583 if (!perf_hpp__is_sort_entry(fmt))
2584 continue;
2585
2586 if (!fmt->elide)
2587 return;
2588 }
2589
2590 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2591 if (!perf_hpp__is_sort_entry(fmt))
2592 continue;
2593
2594 fmt->elide = false;
2595 }
2596}
2597
2598static int output_field_add(struct perf_hpp_list *list, char *tok)
2599{
2600 unsigned int i;
2601
2602 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2603 struct sort_dimension *sd = &common_sort_dimensions[i];
2604
2605 if (strncasecmp(tok, sd->name, strlen(tok)))
2606 continue;
2607
2608 return __sort_dimension__add_output(list, sd);
2609 }
2610
2611 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2612 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2613
2614 if (strncasecmp(tok, hd->name, strlen(tok)))
2615 continue;
2616
2617 return __hpp_dimension__add_output(list, hd);
2618 }
2619
2620 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2621 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2622
2623 if (strncasecmp(tok, sd->name, strlen(tok)))
2624 continue;
2625
2626 return __sort_dimension__add_output(list, sd);
2627 }
2628
2629 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2630 struct sort_dimension *sd = &memory_sort_dimensions[i];
2631
2632 if (strncasecmp(tok, sd->name, strlen(tok)))
2633 continue;
2634
2635 return __sort_dimension__add_output(list, sd);
2636 }
2637
2638 return -ESRCH;
2639}
2640
2641static int setup_output_list(struct perf_hpp_list *list, char *str)
2642{
2643 char *tmp, *tok;
2644 int ret = 0;
2645
2646 for (tok = strtok_r(str, ", ", &tmp);
2647 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2648 ret = output_field_add(list, tok);
2649 if (ret == -EINVAL) {
2650 error("Invalid --fields key: `%s'", tok);
2651 break;
2652 } else if (ret == -ESRCH) {
2653 error("Unknown --fields key: `%s'", tok);
2654 break;
2655 }
2656 }
2657
2658 return ret;
2659}
2660
2661static void reset_dimensions(void)
2662{
2663 unsigned int i;
2664
2665 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2666 common_sort_dimensions[i].taken = 0;
2667
2668 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2669 hpp_sort_dimensions[i].taken = 0;
2670
2671 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2672 bstack_sort_dimensions[i].taken = 0;
2673
2674 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2675 memory_sort_dimensions[i].taken = 0;
2676}
2677
2678bool is_strict_order(const char *order)
2679{
2680 return order && (*order != '+');
2681}
2682
2683static int __setup_output_field(void)
2684{
2685 char *str, *strp;
2686 int ret = -EINVAL;
2687
2688 if (field_order == NULL)
2689 return 0;
2690
2691 strp = str = strdup(field_order);
2692 if (str == NULL) {
2693 error("Not enough memory to setup output fields");
2694 return -ENOMEM;
2695 }
2696
2697 if (!is_strict_order(field_order))
2698 strp++;
2699
2700 if (!strlen(strp)) {
2701 error("Invalid --fields key: `+'");
2702 goto out;
2703 }
2704
2705 ret = setup_output_list(&perf_hpp_list, strp);
2706
2707out:
2708 free(str);
2709 return ret;
2710}
2711
2712int setup_sorting(struct perf_evlist *evlist)
2713{
2714 int err;
2715
2716 err = __setup_sorting(evlist);
2717 if (err < 0)
2718 return err;
2719
2720 if (parent_pattern != default_parent_pattern) {
2721 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2722 if (err < 0)
2723 return err;
2724 }
2725
2726 reset_dimensions();
2727
2728 /*
2729 * perf diff doesn't use default hpp output fields.
2730 */
2731 if (sort__mode != SORT_MODE__DIFF)
2732 perf_hpp__init();
2733
2734 err = __setup_output_field();
2735 if (err < 0)
2736 return err;
2737
2738 /* copy sort keys to output fields */
2739 perf_hpp__setup_output_field(&perf_hpp_list);
2740 /* and then copy output fields to sort keys */
2741 perf_hpp__append_sort_keys(&perf_hpp_list);
2742
2743 /* setup hists-specific output fields */
2744 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2745 return -1;
2746
2747 return 0;
2748}
2749
2750void reset_output_field(void)
2751{
2752 sort__need_collapse = 0;
2753 sort__has_parent = 0;
2754 sort__has_sym = 0;
2755 sort__has_dso = 0;
2756
2757 field_order = NULL;
2758 sort_order = NULL;
2759
2760 reset_dimensions();
2761 perf_hpp__reset_output_field(&perf_hpp_list);
2762}