Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <regex.h>
5#include <stdlib.h>
6#include <linux/mman.h>
7#include <linux/time64.h>
8#include "debug.h"
9#include "dso.h"
10#include "sort.h"
11#include "hist.h"
12#include "cacheline.h"
13#include "comm.h"
14#include "map.h"
15#include "symbol.h"
16#include "map_symbol.h"
17#include "branch.h"
18#include "thread.h"
19#include "evsel.h"
20#include "evlist.h"
21#include "srcline.h"
22#include "strlist.h"
23#include "strbuf.h"
24#include <traceevent/event-parse.h>
25#include "mem-events.h"
26#include "annotate.h"
27#include "time-utils.h"
28#include <linux/kernel.h>
29#include <linux/string.h>
30
31regex_t parent_regex;
32const char default_parent_pattern[] = "^sys_|^do_page_fault";
33const char *parent_pattern = default_parent_pattern;
34const char *default_sort_order = "comm,dso,symbol";
35const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
36const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
37const char default_top_sort_order[] = "dso,symbol";
38const char default_diff_sort_order[] = "dso,symbol";
39const char default_tracepoint_sort_order[] = "trace";
40const char *sort_order;
41const char *field_order;
42regex_t ignore_callees_regex;
43int have_ignore_callees = 0;
44enum sort_mode sort__mode = SORT_MODE__NORMAL;
45
46/*
47 * Replaces all occurrences of a char used with the:
48 *
49 * -t, --field-separator
50 *
51 * option, that uses a special separator character and don't pad with spaces,
52 * replacing all occurrences of this separator in symbol names (and other
53 * output) with a '.' character, that thus it's the only non valid separator.
54*/
55static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
56{
57 int n;
58 va_list ap;
59
60 va_start(ap, fmt);
61 n = vsnprintf(bf, size, fmt, ap);
62 if (symbol_conf.field_sep && n > 0) {
63 char *sep = bf;
64
65 while (1) {
66 sep = strchr(sep, *symbol_conf.field_sep);
67 if (sep == NULL)
68 break;
69 *sep = '.';
70 }
71 }
72 va_end(ap);
73
74 if (n >= (int)size)
75 return size - 1;
76 return n;
77}
78
79static int64_t cmp_null(const void *l, const void *r)
80{
81 if (!l && !r)
82 return 0;
83 else if (!l)
84 return -1;
85 else
86 return 1;
87}
88
89/* --sort pid */
90
91static int64_t
92sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
93{
94 return right->thread->tid - left->thread->tid;
95}
96
97static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
98 size_t size, unsigned int width)
99{
100 const char *comm = thread__comm_str(he->thread);
101
102 width = max(7U, width) - 8;
103 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
104 width, width, comm ?: "");
105}
106
107static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
108{
109 const struct thread *th = arg;
110
111 if (type != HIST_FILTER__THREAD)
112 return -1;
113
114 return th && he->thread != th;
115}
116
117struct sort_entry sort_thread = {
118 .se_header = " Pid:Command",
119 .se_cmp = sort__thread_cmp,
120 .se_snprintf = hist_entry__thread_snprintf,
121 .se_filter = hist_entry__thread_filter,
122 .se_width_idx = HISTC_THREAD,
123};
124
125/* --sort comm */
126
127/*
128 * We can't use pointer comparison in functions below,
129 * because it gives different results based on pointer
130 * values, which could break some sorting assumptions.
131 */
132static int64_t
133sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
134{
135 return strcmp(comm__str(right->comm), comm__str(left->comm));
136}
137
138static int64_t
139sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
140{
141 return strcmp(comm__str(right->comm), comm__str(left->comm));
142}
143
144static int64_t
145sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
146{
147 return strcmp(comm__str(right->comm), comm__str(left->comm));
148}
149
150static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
151 size_t size, unsigned int width)
152{
153 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
154}
155
156struct sort_entry sort_comm = {
157 .se_header = "Command",
158 .se_cmp = sort__comm_cmp,
159 .se_collapse = sort__comm_collapse,
160 .se_sort = sort__comm_sort,
161 .se_snprintf = hist_entry__comm_snprintf,
162 .se_filter = hist_entry__thread_filter,
163 .se_width_idx = HISTC_COMM,
164};
165
166/* --sort dso */
167
168static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
169{
170 struct dso *dso_l = map_l ? map_l->dso : NULL;
171 struct dso *dso_r = map_r ? map_r->dso : NULL;
172 const char *dso_name_l, *dso_name_r;
173
174 if (!dso_l || !dso_r)
175 return cmp_null(dso_r, dso_l);
176
177 if (verbose > 0) {
178 dso_name_l = dso_l->long_name;
179 dso_name_r = dso_r->long_name;
180 } else {
181 dso_name_l = dso_l->short_name;
182 dso_name_r = dso_r->short_name;
183 }
184
185 return strcmp(dso_name_l, dso_name_r);
186}
187
188static int64_t
189sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
190{
191 return _sort__dso_cmp(right->ms.map, left->ms.map);
192}
193
194static int _hist_entry__dso_snprintf(struct map *map, char *bf,
195 size_t size, unsigned int width)
196{
197 if (map && map->dso) {
198 const char *dso_name = verbose > 0 ? map->dso->long_name :
199 map->dso->short_name;
200 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
201 }
202
203 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
204}
205
206static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
207 size_t size, unsigned int width)
208{
209 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
210}
211
212static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
213{
214 const struct dso *dso = arg;
215
216 if (type != HIST_FILTER__DSO)
217 return -1;
218
219 return dso && (!he->ms.map || he->ms.map->dso != dso);
220}
221
222struct sort_entry sort_dso = {
223 .se_header = "Shared Object",
224 .se_cmp = sort__dso_cmp,
225 .se_snprintf = hist_entry__dso_snprintf,
226 .se_filter = hist_entry__dso_filter,
227 .se_width_idx = HISTC_DSO,
228};
229
230/* --sort symbol */
231
232static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
233{
234 return (int64_t)(right_ip - left_ip);
235}
236
237static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
238{
239 if (!sym_l || !sym_r)
240 return cmp_null(sym_l, sym_r);
241
242 if (sym_l == sym_r)
243 return 0;
244
245 if (sym_l->inlined || sym_r->inlined) {
246 int ret = strcmp(sym_l->name, sym_r->name);
247
248 if (ret)
249 return ret;
250 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
251 return 0;
252 }
253
254 if (sym_l->start != sym_r->start)
255 return (int64_t)(sym_r->start - sym_l->start);
256
257 return (int64_t)(sym_r->end - sym_l->end);
258}
259
260static int64_t
261sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
262{
263 int64_t ret;
264
265 if (!left->ms.sym && !right->ms.sym)
266 return _sort__addr_cmp(left->ip, right->ip);
267
268 /*
269 * comparing symbol address alone is not enough since it's a
270 * relative address within a dso.
271 */
272 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
273 ret = sort__dso_cmp(left, right);
274 if (ret != 0)
275 return ret;
276 }
277
278 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
279}
280
281static int64_t
282sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
283{
284 if (!left->ms.sym || !right->ms.sym)
285 return cmp_null(left->ms.sym, right->ms.sym);
286
287 return strcmp(right->ms.sym->name, left->ms.sym->name);
288}
289
290static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
291 u64 ip, char level, char *bf, size_t size,
292 unsigned int width)
293{
294 size_t ret = 0;
295
296 if (verbose > 0) {
297 char o = map ? dso__symtab_origin(map->dso) : '!';
298 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
299 BITS_PER_LONG / 4 + 2, ip, o);
300 }
301
302 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
303 if (sym && map) {
304 if (sym->type == STT_OBJECT) {
305 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
306 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
307 ip - map->unmap_ip(map, sym->start));
308 } else {
309 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
310 width - ret,
311 sym->name);
312 if (sym->inlined)
313 ret += repsep_snprintf(bf + ret, size - ret,
314 " (inlined)");
315 }
316 } else {
317 size_t len = BITS_PER_LONG / 4;
318 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
319 len, ip);
320 }
321
322 return ret;
323}
324
325static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
326 size_t size, unsigned int width)
327{
328 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
329 he->level, bf, size, width);
330}
331
332static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
333{
334 const char *sym = arg;
335
336 if (type != HIST_FILTER__SYMBOL)
337 return -1;
338
339 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
340}
341
342struct sort_entry sort_sym = {
343 .se_header = "Symbol",
344 .se_cmp = sort__sym_cmp,
345 .se_sort = sort__sym_sort,
346 .se_snprintf = hist_entry__sym_snprintf,
347 .se_filter = hist_entry__sym_filter,
348 .se_width_idx = HISTC_SYMBOL,
349};
350
351/* --sort srcline */
352
353char *hist_entry__srcline(struct hist_entry *he)
354{
355 return map__srcline(he->ms.map, he->ip, he->ms.sym);
356}
357
358static int64_t
359sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
360{
361 if (!left->srcline)
362 left->srcline = hist_entry__srcline(left);
363 if (!right->srcline)
364 right->srcline = hist_entry__srcline(right);
365
366 return strcmp(right->srcline, left->srcline);
367}
368
369static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
370 size_t size, unsigned int width)
371{
372 if (!he->srcline)
373 he->srcline = hist_entry__srcline(he);
374
375 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
376}
377
378struct sort_entry sort_srcline = {
379 .se_header = "Source:Line",
380 .se_cmp = sort__srcline_cmp,
381 .se_snprintf = hist_entry__srcline_snprintf,
382 .se_width_idx = HISTC_SRCLINE,
383};
384
385/* --sort srcline_from */
386
387static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
388{
389 return map__srcline(ams->map, ams->al_addr, ams->sym);
390}
391
392static int64_t
393sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
394{
395 if (!left->branch_info->srcline_from)
396 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
397
398 if (!right->branch_info->srcline_from)
399 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
400
401 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
402}
403
404static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
405 size_t size, unsigned int width)
406{
407 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
408}
409
410struct sort_entry sort_srcline_from = {
411 .se_header = "From Source:Line",
412 .se_cmp = sort__srcline_from_cmp,
413 .se_snprintf = hist_entry__srcline_from_snprintf,
414 .se_width_idx = HISTC_SRCLINE_FROM,
415};
416
417/* --sort srcline_to */
418
419static int64_t
420sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
421{
422 if (!left->branch_info->srcline_to)
423 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
424
425 if (!right->branch_info->srcline_to)
426 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
427
428 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
429}
430
431static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
432 size_t size, unsigned int width)
433{
434 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
435}
436
437struct sort_entry sort_srcline_to = {
438 .se_header = "To Source:Line",
439 .se_cmp = sort__srcline_to_cmp,
440 .se_snprintf = hist_entry__srcline_to_snprintf,
441 .se_width_idx = HISTC_SRCLINE_TO,
442};
443
444static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
445 size_t size, unsigned int width)
446{
447
448 struct symbol *sym = he->ms.sym;
449 struct annotation *notes;
450 double ipc = 0.0, coverage = 0.0;
451 char tmp[64];
452
453 if (!sym)
454 return repsep_snprintf(bf, size, "%-*s", width, "-");
455
456 notes = symbol__annotation(sym);
457
458 if (notes->hit_cycles)
459 ipc = notes->hit_insn / ((double)notes->hit_cycles);
460
461 if (notes->total_insn) {
462 coverage = notes->cover_insn * 100.0 /
463 ((double)notes->total_insn);
464 }
465
466 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
467 return repsep_snprintf(bf, size, "%-*s", width, tmp);
468}
469
470struct sort_entry sort_sym_ipc = {
471 .se_header = "IPC [IPC Coverage]",
472 .se_cmp = sort__sym_cmp,
473 .se_snprintf = hist_entry__sym_ipc_snprintf,
474 .se_width_idx = HISTC_SYMBOL_IPC,
475};
476
477static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
478 __maybe_unused,
479 char *bf, size_t size,
480 unsigned int width)
481{
482 char tmp[64];
483
484 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
485 return repsep_snprintf(bf, size, "%-*s", width, tmp);
486}
487
488struct sort_entry sort_sym_ipc_null = {
489 .se_header = "IPC [IPC Coverage]",
490 .se_cmp = sort__sym_cmp,
491 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
492 .se_width_idx = HISTC_SYMBOL_IPC,
493};
494
495/* --sort srcfile */
496
497static char no_srcfile[1];
498
499static char *hist_entry__get_srcfile(struct hist_entry *e)
500{
501 char *sf, *p;
502 struct map *map = e->ms.map;
503
504 if (!map)
505 return no_srcfile;
506
507 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
508 e->ms.sym, false, true, true, e->ip);
509 if (!strcmp(sf, SRCLINE_UNKNOWN))
510 return no_srcfile;
511 p = strchr(sf, ':');
512 if (p && *sf) {
513 *p = 0;
514 return sf;
515 }
516 free(sf);
517 return no_srcfile;
518}
519
520static int64_t
521sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
522{
523 if (!left->srcfile)
524 left->srcfile = hist_entry__get_srcfile(left);
525 if (!right->srcfile)
526 right->srcfile = hist_entry__get_srcfile(right);
527
528 return strcmp(right->srcfile, left->srcfile);
529}
530
531static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
532 size_t size, unsigned int width)
533{
534 if (!he->srcfile)
535 he->srcfile = hist_entry__get_srcfile(he);
536
537 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
538}
539
540struct sort_entry sort_srcfile = {
541 .se_header = "Source File",
542 .se_cmp = sort__srcfile_cmp,
543 .se_snprintf = hist_entry__srcfile_snprintf,
544 .se_width_idx = HISTC_SRCFILE,
545};
546
547/* --sort parent */
548
549static int64_t
550sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
551{
552 struct symbol *sym_l = left->parent;
553 struct symbol *sym_r = right->parent;
554
555 if (!sym_l || !sym_r)
556 return cmp_null(sym_l, sym_r);
557
558 return strcmp(sym_r->name, sym_l->name);
559}
560
561static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
562 size_t size, unsigned int width)
563{
564 return repsep_snprintf(bf, size, "%-*.*s", width, width,
565 he->parent ? he->parent->name : "[other]");
566}
567
568struct sort_entry sort_parent = {
569 .se_header = "Parent symbol",
570 .se_cmp = sort__parent_cmp,
571 .se_snprintf = hist_entry__parent_snprintf,
572 .se_width_idx = HISTC_PARENT,
573};
574
575/* --sort cpu */
576
577static int64_t
578sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
579{
580 return right->cpu - left->cpu;
581}
582
583static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
584 size_t size, unsigned int width)
585{
586 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
587}
588
589struct sort_entry sort_cpu = {
590 .se_header = "CPU",
591 .se_cmp = sort__cpu_cmp,
592 .se_snprintf = hist_entry__cpu_snprintf,
593 .se_width_idx = HISTC_CPU,
594};
595
596/* --sort cgroup_id */
597
598static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
599{
600 return (int64_t)(right_dev - left_dev);
601}
602
603static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
604{
605 return (int64_t)(right_ino - left_ino);
606}
607
608static int64_t
609sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
610{
611 int64_t ret;
612
613 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
614 if (ret != 0)
615 return ret;
616
617 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
618 left->cgroup_id.ino);
619}
620
621static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
622 char *bf, size_t size,
623 unsigned int width __maybe_unused)
624{
625 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
626 he->cgroup_id.ino);
627}
628
629struct sort_entry sort_cgroup_id = {
630 .se_header = "cgroup id (dev/inode)",
631 .se_cmp = sort__cgroup_id_cmp,
632 .se_snprintf = hist_entry__cgroup_id_snprintf,
633 .se_width_idx = HISTC_CGROUP_ID,
634};
635
636/* --sort socket */
637
638static int64_t
639sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
640{
641 return right->socket - left->socket;
642}
643
644static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
645 size_t size, unsigned int width)
646{
647 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
648}
649
650static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
651{
652 int sk = *(const int *)arg;
653
654 if (type != HIST_FILTER__SOCKET)
655 return -1;
656
657 return sk >= 0 && he->socket != sk;
658}
659
660struct sort_entry sort_socket = {
661 .se_header = "Socket",
662 .se_cmp = sort__socket_cmp,
663 .se_snprintf = hist_entry__socket_snprintf,
664 .se_filter = hist_entry__socket_filter,
665 .se_width_idx = HISTC_SOCKET,
666};
667
668/* --sort time */
669
670static int64_t
671sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
672{
673 return right->time - left->time;
674}
675
676static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
677 size_t size, unsigned int width)
678{
679 char he_time[32];
680
681 if (symbol_conf.nanosecs)
682 timestamp__scnprintf_nsec(he->time, he_time,
683 sizeof(he_time));
684 else
685 timestamp__scnprintf_usec(he->time, he_time,
686 sizeof(he_time));
687
688 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
689}
690
691struct sort_entry sort_time = {
692 .se_header = "Time",
693 .se_cmp = sort__time_cmp,
694 .se_snprintf = hist_entry__time_snprintf,
695 .se_width_idx = HISTC_TIME,
696};
697
698/* --sort trace */
699
700static char *get_trace_output(struct hist_entry *he)
701{
702 struct trace_seq seq;
703 struct evsel *evsel;
704 struct tep_record rec = {
705 .data = he->raw_data,
706 .size = he->raw_size,
707 };
708
709 evsel = hists_to_evsel(he->hists);
710
711 trace_seq_init(&seq);
712 if (symbol_conf.raw_trace) {
713 tep_print_fields(&seq, he->raw_data, he->raw_size,
714 evsel->tp_format);
715 } else {
716 tep_print_event(evsel->tp_format->tep,
717 &seq, &rec, "%s", TEP_PRINT_INFO);
718 }
719 /*
720 * Trim the buffer, it starts at 4KB and we're not going to
721 * add anything more to this buffer.
722 */
723 return realloc(seq.buffer, seq.len + 1);
724}
725
726static int64_t
727sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
728{
729 struct evsel *evsel;
730
731 evsel = hists_to_evsel(left->hists);
732 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
733 return 0;
734
735 if (left->trace_output == NULL)
736 left->trace_output = get_trace_output(left);
737 if (right->trace_output == NULL)
738 right->trace_output = get_trace_output(right);
739
740 return strcmp(right->trace_output, left->trace_output);
741}
742
743static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
744 size_t size, unsigned int width)
745{
746 struct evsel *evsel;
747
748 evsel = hists_to_evsel(he->hists);
749 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
750 return scnprintf(bf, size, "%-.*s", width, "N/A");
751
752 if (he->trace_output == NULL)
753 he->trace_output = get_trace_output(he);
754 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
755}
756
757struct sort_entry sort_trace = {
758 .se_header = "Trace output",
759 .se_cmp = sort__trace_cmp,
760 .se_snprintf = hist_entry__trace_snprintf,
761 .se_width_idx = HISTC_TRACE,
762};
763
764/* sort keys for branch stacks */
765
766static int64_t
767sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
768{
769 if (!left->branch_info || !right->branch_info)
770 return cmp_null(left->branch_info, right->branch_info);
771
772 return _sort__dso_cmp(left->branch_info->from.map,
773 right->branch_info->from.map);
774}
775
776static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
777 size_t size, unsigned int width)
778{
779 if (he->branch_info)
780 return _hist_entry__dso_snprintf(he->branch_info->from.map,
781 bf, size, width);
782 else
783 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
784}
785
786static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
787 const void *arg)
788{
789 const struct dso *dso = arg;
790
791 if (type != HIST_FILTER__DSO)
792 return -1;
793
794 return dso && (!he->branch_info || !he->branch_info->from.map ||
795 he->branch_info->from.map->dso != dso);
796}
797
798static int64_t
799sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
800{
801 if (!left->branch_info || !right->branch_info)
802 return cmp_null(left->branch_info, right->branch_info);
803
804 return _sort__dso_cmp(left->branch_info->to.map,
805 right->branch_info->to.map);
806}
807
808static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
809 size_t size, unsigned int width)
810{
811 if (he->branch_info)
812 return _hist_entry__dso_snprintf(he->branch_info->to.map,
813 bf, size, width);
814 else
815 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
816}
817
818static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
819 const void *arg)
820{
821 const struct dso *dso = arg;
822
823 if (type != HIST_FILTER__DSO)
824 return -1;
825
826 return dso && (!he->branch_info || !he->branch_info->to.map ||
827 he->branch_info->to.map->dso != dso);
828}
829
830static int64_t
831sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
832{
833 struct addr_map_symbol *from_l = &left->branch_info->from;
834 struct addr_map_symbol *from_r = &right->branch_info->from;
835
836 if (!left->branch_info || !right->branch_info)
837 return cmp_null(left->branch_info, right->branch_info);
838
839 from_l = &left->branch_info->from;
840 from_r = &right->branch_info->from;
841
842 if (!from_l->sym && !from_r->sym)
843 return _sort__addr_cmp(from_l->addr, from_r->addr);
844
845 return _sort__sym_cmp(from_l->sym, from_r->sym);
846}
847
848static int64_t
849sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
850{
851 struct addr_map_symbol *to_l, *to_r;
852
853 if (!left->branch_info || !right->branch_info)
854 return cmp_null(left->branch_info, right->branch_info);
855
856 to_l = &left->branch_info->to;
857 to_r = &right->branch_info->to;
858
859 if (!to_l->sym && !to_r->sym)
860 return _sort__addr_cmp(to_l->addr, to_r->addr);
861
862 return _sort__sym_cmp(to_l->sym, to_r->sym);
863}
864
865static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
866 size_t size, unsigned int width)
867{
868 if (he->branch_info) {
869 struct addr_map_symbol *from = &he->branch_info->from;
870
871 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
872 he->level, bf, size, width);
873 }
874
875 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
876}
877
878static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
879 size_t size, unsigned int width)
880{
881 if (he->branch_info) {
882 struct addr_map_symbol *to = &he->branch_info->to;
883
884 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
885 he->level, bf, size, width);
886 }
887
888 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
889}
890
891static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
892 const void *arg)
893{
894 const char *sym = arg;
895
896 if (type != HIST_FILTER__SYMBOL)
897 return -1;
898
899 return sym && !(he->branch_info && he->branch_info->from.sym &&
900 strstr(he->branch_info->from.sym->name, sym));
901}
902
903static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
904 const void *arg)
905{
906 const char *sym = arg;
907
908 if (type != HIST_FILTER__SYMBOL)
909 return -1;
910
911 return sym && !(he->branch_info && he->branch_info->to.sym &&
912 strstr(he->branch_info->to.sym->name, sym));
913}
914
915struct sort_entry sort_dso_from = {
916 .se_header = "Source Shared Object",
917 .se_cmp = sort__dso_from_cmp,
918 .se_snprintf = hist_entry__dso_from_snprintf,
919 .se_filter = hist_entry__dso_from_filter,
920 .se_width_idx = HISTC_DSO_FROM,
921};
922
923struct sort_entry sort_dso_to = {
924 .se_header = "Target Shared Object",
925 .se_cmp = sort__dso_to_cmp,
926 .se_snprintf = hist_entry__dso_to_snprintf,
927 .se_filter = hist_entry__dso_to_filter,
928 .se_width_idx = HISTC_DSO_TO,
929};
930
931struct sort_entry sort_sym_from = {
932 .se_header = "Source Symbol",
933 .se_cmp = sort__sym_from_cmp,
934 .se_snprintf = hist_entry__sym_from_snprintf,
935 .se_filter = hist_entry__sym_from_filter,
936 .se_width_idx = HISTC_SYMBOL_FROM,
937};
938
939struct sort_entry sort_sym_to = {
940 .se_header = "Target Symbol",
941 .se_cmp = sort__sym_to_cmp,
942 .se_snprintf = hist_entry__sym_to_snprintf,
943 .se_filter = hist_entry__sym_to_filter,
944 .se_width_idx = HISTC_SYMBOL_TO,
945};
946
947static int64_t
948sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
949{
950 unsigned char mp, p;
951
952 if (!left->branch_info || !right->branch_info)
953 return cmp_null(left->branch_info, right->branch_info);
954
955 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
956 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
957 return mp || p;
958}
959
960static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
961 size_t size, unsigned int width){
962 static const char *out = "N/A";
963
964 if (he->branch_info) {
965 if (he->branch_info->flags.predicted)
966 out = "N";
967 else if (he->branch_info->flags.mispred)
968 out = "Y";
969 }
970
971 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
972}
973
974static int64_t
975sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
976{
977 if (!left->branch_info || !right->branch_info)
978 return cmp_null(left->branch_info, right->branch_info);
979
980 return left->branch_info->flags.cycles -
981 right->branch_info->flags.cycles;
982}
983
984static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
985 size_t size, unsigned int width)
986{
987 if (!he->branch_info)
988 return scnprintf(bf, size, "%-.*s", width, "N/A");
989 if (he->branch_info->flags.cycles == 0)
990 return repsep_snprintf(bf, size, "%-*s", width, "-");
991 return repsep_snprintf(bf, size, "%-*hd", width,
992 he->branch_info->flags.cycles);
993}
994
995struct sort_entry sort_cycles = {
996 .se_header = "Basic Block Cycles",
997 .se_cmp = sort__cycles_cmp,
998 .se_snprintf = hist_entry__cycles_snprintf,
999 .se_width_idx = HISTC_CYCLES,
1000};
1001
1002/* --sort daddr_sym */
1003int64_t
1004sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1005{
1006 uint64_t l = 0, r = 0;
1007
1008 if (left->mem_info)
1009 l = left->mem_info->daddr.addr;
1010 if (right->mem_info)
1011 r = right->mem_info->daddr.addr;
1012
1013 return (int64_t)(r - l);
1014}
1015
1016static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1017 size_t size, unsigned int width)
1018{
1019 uint64_t addr = 0;
1020 struct map *map = NULL;
1021 struct symbol *sym = NULL;
1022
1023 if (he->mem_info) {
1024 addr = he->mem_info->daddr.addr;
1025 map = he->mem_info->daddr.map;
1026 sym = he->mem_info->daddr.sym;
1027 }
1028 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
1029 width);
1030}
1031
1032int64_t
1033sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1034{
1035 uint64_t l = 0, r = 0;
1036
1037 if (left->mem_info)
1038 l = left->mem_info->iaddr.addr;
1039 if (right->mem_info)
1040 r = right->mem_info->iaddr.addr;
1041
1042 return (int64_t)(r - l);
1043}
1044
1045static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1046 size_t size, unsigned int width)
1047{
1048 uint64_t addr = 0;
1049 struct map *map = NULL;
1050 struct symbol *sym = NULL;
1051
1052 if (he->mem_info) {
1053 addr = he->mem_info->iaddr.addr;
1054 map = he->mem_info->iaddr.map;
1055 sym = he->mem_info->iaddr.sym;
1056 }
1057 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
1058 width);
1059}
1060
1061static int64_t
1062sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1063{
1064 struct map *map_l = NULL;
1065 struct map *map_r = NULL;
1066
1067 if (left->mem_info)
1068 map_l = left->mem_info->daddr.map;
1069 if (right->mem_info)
1070 map_r = right->mem_info->daddr.map;
1071
1072 return _sort__dso_cmp(map_l, map_r);
1073}
1074
1075static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1076 size_t size, unsigned int width)
1077{
1078 struct map *map = NULL;
1079
1080 if (he->mem_info)
1081 map = he->mem_info->daddr.map;
1082
1083 return _hist_entry__dso_snprintf(map, bf, size, width);
1084}
1085
1086static int64_t
1087sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1088{
1089 union perf_mem_data_src data_src_l;
1090 union perf_mem_data_src data_src_r;
1091
1092 if (left->mem_info)
1093 data_src_l = left->mem_info->data_src;
1094 else
1095 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1096
1097 if (right->mem_info)
1098 data_src_r = right->mem_info->data_src;
1099 else
1100 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1101
1102 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1103}
1104
1105static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1106 size_t size, unsigned int width)
1107{
1108 char out[10];
1109
1110 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1111 return repsep_snprintf(bf, size, "%.*s", width, out);
1112}
1113
1114static int64_t
1115sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1116{
1117 union perf_mem_data_src data_src_l;
1118 union perf_mem_data_src data_src_r;
1119
1120 if (left->mem_info)
1121 data_src_l = left->mem_info->data_src;
1122 else
1123 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1124
1125 if (right->mem_info)
1126 data_src_r = right->mem_info->data_src;
1127 else
1128 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1129
1130 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1131}
1132
1133static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1134 size_t size, unsigned int width)
1135{
1136 char out[64];
1137
1138 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1139 return repsep_snprintf(bf, size, "%-*s", width, out);
1140}
1141
1142static int64_t
1143sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1144{
1145 union perf_mem_data_src data_src_l;
1146 union perf_mem_data_src data_src_r;
1147
1148 if (left->mem_info)
1149 data_src_l = left->mem_info->data_src;
1150 else
1151 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1152
1153 if (right->mem_info)
1154 data_src_r = right->mem_info->data_src;
1155 else
1156 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1157
1158 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1159}
1160
1161static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1162 size_t size, unsigned int width)
1163{
1164 char out[64];
1165
1166 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1167 return repsep_snprintf(bf, size, "%-*s", width, out);
1168}
1169
1170static int64_t
1171sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1172{
1173 union perf_mem_data_src data_src_l;
1174 union perf_mem_data_src data_src_r;
1175
1176 if (left->mem_info)
1177 data_src_l = left->mem_info->data_src;
1178 else
1179 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1180
1181 if (right->mem_info)
1182 data_src_r = right->mem_info->data_src;
1183 else
1184 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1185
1186 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1187}
1188
1189static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1190 size_t size, unsigned int width)
1191{
1192 char out[64];
1193
1194 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1195 return repsep_snprintf(bf, size, "%-*s", width, out);
1196}
1197
1198int64_t
1199sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1200{
1201 u64 l, r;
1202 struct map *l_map, *r_map;
1203
1204 if (!left->mem_info) return -1;
1205 if (!right->mem_info) return 1;
1206
1207 /* group event types together */
1208 if (left->cpumode > right->cpumode) return -1;
1209 if (left->cpumode < right->cpumode) return 1;
1210
1211 l_map = left->mem_info->daddr.map;
1212 r_map = right->mem_info->daddr.map;
1213
1214 /* if both are NULL, jump to sort on al_addr instead */
1215 if (!l_map && !r_map)
1216 goto addr;
1217
1218 if (!l_map) return -1;
1219 if (!r_map) return 1;
1220
1221 if (l_map->maj > r_map->maj) return -1;
1222 if (l_map->maj < r_map->maj) return 1;
1223
1224 if (l_map->min > r_map->min) return -1;
1225 if (l_map->min < r_map->min) return 1;
1226
1227 if (l_map->ino > r_map->ino) return -1;
1228 if (l_map->ino < r_map->ino) return 1;
1229
1230 if (l_map->ino_generation > r_map->ino_generation) return -1;
1231 if (l_map->ino_generation < r_map->ino_generation) return 1;
1232
1233 /*
1234 * Addresses with no major/minor numbers are assumed to be
1235 * anonymous in userspace. Sort those on pid then address.
1236 *
1237 * The kernel and non-zero major/minor mapped areas are
1238 * assumed to be unity mapped. Sort those on address.
1239 */
1240
1241 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1242 (!(l_map->flags & MAP_SHARED)) &&
1243 !l_map->maj && !l_map->min && !l_map->ino &&
1244 !l_map->ino_generation) {
1245 /* userspace anonymous */
1246
1247 if (left->thread->pid_ > right->thread->pid_) return -1;
1248 if (left->thread->pid_ < right->thread->pid_) return 1;
1249 }
1250
1251addr:
1252 /* al_addr does all the right addr - start + offset calculations */
1253 l = cl_address(left->mem_info->daddr.al_addr);
1254 r = cl_address(right->mem_info->daddr.al_addr);
1255
1256 if (l > r) return -1;
1257 if (l < r) return 1;
1258
1259 return 0;
1260}
1261
1262static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1263 size_t size, unsigned int width)
1264{
1265
1266 uint64_t addr = 0;
1267 struct map *map = NULL;
1268 struct symbol *sym = NULL;
1269 char level = he->level;
1270
1271 if (he->mem_info) {
1272 addr = cl_address(he->mem_info->daddr.al_addr);
1273 map = he->mem_info->daddr.map;
1274 sym = he->mem_info->daddr.sym;
1275
1276 /* print [s] for shared data mmaps */
1277 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1278 map && !(map->prot & PROT_EXEC) &&
1279 (map->flags & MAP_SHARED) &&
1280 (map->maj || map->min || map->ino ||
1281 map->ino_generation))
1282 level = 's';
1283 else if (!map)
1284 level = 'X';
1285 }
1286 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1287 width);
1288}
1289
1290struct sort_entry sort_mispredict = {
1291 .se_header = "Branch Mispredicted",
1292 .se_cmp = sort__mispredict_cmp,
1293 .se_snprintf = hist_entry__mispredict_snprintf,
1294 .se_width_idx = HISTC_MISPREDICT,
1295};
1296
1297static u64 he_weight(struct hist_entry *he)
1298{
1299 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1300}
1301
1302static int64_t
1303sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1304{
1305 return he_weight(left) - he_weight(right);
1306}
1307
1308static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1309 size_t size, unsigned int width)
1310{
1311 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1312}
1313
1314struct sort_entry sort_local_weight = {
1315 .se_header = "Local Weight",
1316 .se_cmp = sort__local_weight_cmp,
1317 .se_snprintf = hist_entry__local_weight_snprintf,
1318 .se_width_idx = HISTC_LOCAL_WEIGHT,
1319};
1320
1321static int64_t
1322sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1323{
1324 return left->stat.weight - right->stat.weight;
1325}
1326
1327static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1328 size_t size, unsigned int width)
1329{
1330 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1331}
1332
1333struct sort_entry sort_global_weight = {
1334 .se_header = "Weight",
1335 .se_cmp = sort__global_weight_cmp,
1336 .se_snprintf = hist_entry__global_weight_snprintf,
1337 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1338};
1339
1340struct sort_entry sort_mem_daddr_sym = {
1341 .se_header = "Data Symbol",
1342 .se_cmp = sort__daddr_cmp,
1343 .se_snprintf = hist_entry__daddr_snprintf,
1344 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1345};
1346
1347struct sort_entry sort_mem_iaddr_sym = {
1348 .se_header = "Code Symbol",
1349 .se_cmp = sort__iaddr_cmp,
1350 .se_snprintf = hist_entry__iaddr_snprintf,
1351 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1352};
1353
1354struct sort_entry sort_mem_daddr_dso = {
1355 .se_header = "Data Object",
1356 .se_cmp = sort__dso_daddr_cmp,
1357 .se_snprintf = hist_entry__dso_daddr_snprintf,
1358 .se_width_idx = HISTC_MEM_DADDR_DSO,
1359};
1360
1361struct sort_entry sort_mem_locked = {
1362 .se_header = "Locked",
1363 .se_cmp = sort__locked_cmp,
1364 .se_snprintf = hist_entry__locked_snprintf,
1365 .se_width_idx = HISTC_MEM_LOCKED,
1366};
1367
1368struct sort_entry sort_mem_tlb = {
1369 .se_header = "TLB access",
1370 .se_cmp = sort__tlb_cmp,
1371 .se_snprintf = hist_entry__tlb_snprintf,
1372 .se_width_idx = HISTC_MEM_TLB,
1373};
1374
1375struct sort_entry sort_mem_lvl = {
1376 .se_header = "Memory access",
1377 .se_cmp = sort__lvl_cmp,
1378 .se_snprintf = hist_entry__lvl_snprintf,
1379 .se_width_idx = HISTC_MEM_LVL,
1380};
1381
1382struct sort_entry sort_mem_snoop = {
1383 .se_header = "Snoop",
1384 .se_cmp = sort__snoop_cmp,
1385 .se_snprintf = hist_entry__snoop_snprintf,
1386 .se_width_idx = HISTC_MEM_SNOOP,
1387};
1388
1389struct sort_entry sort_mem_dcacheline = {
1390 .se_header = "Data Cacheline",
1391 .se_cmp = sort__dcacheline_cmp,
1392 .se_snprintf = hist_entry__dcacheline_snprintf,
1393 .se_width_idx = HISTC_MEM_DCACHELINE,
1394};
1395
1396static int64_t
1397sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1398{
1399 uint64_t l = 0, r = 0;
1400
1401 if (left->mem_info)
1402 l = left->mem_info->daddr.phys_addr;
1403 if (right->mem_info)
1404 r = right->mem_info->daddr.phys_addr;
1405
1406 return (int64_t)(r - l);
1407}
1408
1409static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1410 size_t size, unsigned int width)
1411{
1412 uint64_t addr = 0;
1413 size_t ret = 0;
1414 size_t len = BITS_PER_LONG / 4;
1415
1416 addr = he->mem_info->daddr.phys_addr;
1417
1418 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1419
1420 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1421
1422 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1423
1424 if (ret > width)
1425 bf[width] = '\0';
1426
1427 return width;
1428}
1429
1430struct sort_entry sort_mem_phys_daddr = {
1431 .se_header = "Data Physical Address",
1432 .se_cmp = sort__phys_daddr_cmp,
1433 .se_snprintf = hist_entry__phys_daddr_snprintf,
1434 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1435};
1436
1437static int64_t
1438sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1439{
1440 if (!left->branch_info || !right->branch_info)
1441 return cmp_null(left->branch_info, right->branch_info);
1442
1443 return left->branch_info->flags.abort !=
1444 right->branch_info->flags.abort;
1445}
1446
1447static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1448 size_t size, unsigned int width)
1449{
1450 static const char *out = "N/A";
1451
1452 if (he->branch_info) {
1453 if (he->branch_info->flags.abort)
1454 out = "A";
1455 else
1456 out = ".";
1457 }
1458
1459 return repsep_snprintf(bf, size, "%-*s", width, out);
1460}
1461
1462struct sort_entry sort_abort = {
1463 .se_header = "Transaction abort",
1464 .se_cmp = sort__abort_cmp,
1465 .se_snprintf = hist_entry__abort_snprintf,
1466 .se_width_idx = HISTC_ABORT,
1467};
1468
1469static int64_t
1470sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1471{
1472 if (!left->branch_info || !right->branch_info)
1473 return cmp_null(left->branch_info, right->branch_info);
1474
1475 return left->branch_info->flags.in_tx !=
1476 right->branch_info->flags.in_tx;
1477}
1478
1479static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1480 size_t size, unsigned int width)
1481{
1482 static const char *out = "N/A";
1483
1484 if (he->branch_info) {
1485 if (he->branch_info->flags.in_tx)
1486 out = "T";
1487 else
1488 out = ".";
1489 }
1490
1491 return repsep_snprintf(bf, size, "%-*s", width, out);
1492}
1493
1494struct sort_entry sort_in_tx = {
1495 .se_header = "Branch in transaction",
1496 .se_cmp = sort__in_tx_cmp,
1497 .se_snprintf = hist_entry__in_tx_snprintf,
1498 .se_width_idx = HISTC_IN_TX,
1499};
1500
1501static int64_t
1502sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1503{
1504 return left->transaction - right->transaction;
1505}
1506
1507static inline char *add_str(char *p, const char *str)
1508{
1509 strcpy(p, str);
1510 return p + strlen(str);
1511}
1512
1513static struct txbit {
1514 unsigned flag;
1515 const char *name;
1516 int skip_for_len;
1517} txbits[] = {
1518 { PERF_TXN_ELISION, "EL ", 0 },
1519 { PERF_TXN_TRANSACTION, "TX ", 1 },
1520 { PERF_TXN_SYNC, "SYNC ", 1 },
1521 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1522 { PERF_TXN_RETRY, "RETRY ", 0 },
1523 { PERF_TXN_CONFLICT, "CON ", 0 },
1524 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1525 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1526 { 0, NULL, 0 }
1527};
1528
1529int hist_entry__transaction_len(void)
1530{
1531 int i;
1532 int len = 0;
1533
1534 for (i = 0; txbits[i].name; i++) {
1535 if (!txbits[i].skip_for_len)
1536 len += strlen(txbits[i].name);
1537 }
1538 len += 4; /* :XX<space> */
1539 return len;
1540}
1541
1542static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1543 size_t size, unsigned int width)
1544{
1545 u64 t = he->transaction;
1546 char buf[128];
1547 char *p = buf;
1548 int i;
1549
1550 buf[0] = 0;
1551 for (i = 0; txbits[i].name; i++)
1552 if (txbits[i].flag & t)
1553 p = add_str(p, txbits[i].name);
1554 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1555 p = add_str(p, "NEITHER ");
1556 if (t & PERF_TXN_ABORT_MASK) {
1557 sprintf(p, ":%" PRIx64,
1558 (t & PERF_TXN_ABORT_MASK) >>
1559 PERF_TXN_ABORT_SHIFT);
1560 p += strlen(p);
1561 }
1562
1563 return repsep_snprintf(bf, size, "%-*s", width, buf);
1564}
1565
1566struct sort_entry sort_transaction = {
1567 .se_header = "Transaction ",
1568 .se_cmp = sort__transaction_cmp,
1569 .se_snprintf = hist_entry__transaction_snprintf,
1570 .se_width_idx = HISTC_TRANSACTION,
1571};
1572
1573/* --sort symbol_size */
1574
1575static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1576{
1577 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1578 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1579
1580 return size_l < size_r ? -1 :
1581 size_l == size_r ? 0 : 1;
1582}
1583
1584static int64_t
1585sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1586{
1587 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1588}
1589
1590static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1591 size_t bf_size, unsigned int width)
1592{
1593 if (sym)
1594 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1595
1596 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1597}
1598
1599static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1600 size_t size, unsigned int width)
1601{
1602 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1603}
1604
1605struct sort_entry sort_sym_size = {
1606 .se_header = "Symbol size",
1607 .se_cmp = sort__sym_size_cmp,
1608 .se_snprintf = hist_entry__sym_size_snprintf,
1609 .se_width_idx = HISTC_SYM_SIZE,
1610};
1611
1612/* --sort dso_size */
1613
1614static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1615{
1616 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1617 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1618
1619 return size_l < size_r ? -1 :
1620 size_l == size_r ? 0 : 1;
1621}
1622
1623static int64_t
1624sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1625{
1626 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1627}
1628
1629static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1630 size_t bf_size, unsigned int width)
1631{
1632 if (map && map->dso)
1633 return repsep_snprintf(bf, bf_size, "%*d", width,
1634 map__size(map));
1635
1636 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1637}
1638
1639static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1640 size_t size, unsigned int width)
1641{
1642 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1643}
1644
1645struct sort_entry sort_dso_size = {
1646 .se_header = "DSO size",
1647 .se_cmp = sort__dso_size_cmp,
1648 .se_snprintf = hist_entry__dso_size_snprintf,
1649 .se_width_idx = HISTC_DSO_SIZE,
1650};
1651
1652
1653struct sort_dimension {
1654 const char *name;
1655 struct sort_entry *entry;
1656 int taken;
1657};
1658
1659#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1660
1661static struct sort_dimension common_sort_dimensions[] = {
1662 DIM(SORT_PID, "pid", sort_thread),
1663 DIM(SORT_COMM, "comm", sort_comm),
1664 DIM(SORT_DSO, "dso", sort_dso),
1665 DIM(SORT_SYM, "symbol", sort_sym),
1666 DIM(SORT_PARENT, "parent", sort_parent),
1667 DIM(SORT_CPU, "cpu", sort_cpu),
1668 DIM(SORT_SOCKET, "socket", sort_socket),
1669 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1670 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1671 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1672 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1673 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1674 DIM(SORT_TRACE, "trace", sort_trace),
1675 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1676 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1677 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1678 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1679 DIM(SORT_TIME, "time", sort_time),
1680};
1681
1682#undef DIM
1683
1684#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1685
1686static struct sort_dimension bstack_sort_dimensions[] = {
1687 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1688 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1689 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1690 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1691 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1692 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1693 DIM(SORT_ABORT, "abort", sort_abort),
1694 DIM(SORT_CYCLES, "cycles", sort_cycles),
1695 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1696 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1697 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
1698};
1699
1700#undef DIM
1701
1702#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1703
1704static struct sort_dimension memory_sort_dimensions[] = {
1705 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1706 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1707 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1708 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1709 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1710 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1711 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1712 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1713 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1714};
1715
1716#undef DIM
1717
1718struct hpp_dimension {
1719 const char *name;
1720 struct perf_hpp_fmt *fmt;
1721 int taken;
1722};
1723
1724#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1725
1726static struct hpp_dimension hpp_sort_dimensions[] = {
1727 DIM(PERF_HPP__OVERHEAD, "overhead"),
1728 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1729 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1730 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1731 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1732 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1733 DIM(PERF_HPP__SAMPLES, "sample"),
1734 DIM(PERF_HPP__PERIOD, "period"),
1735};
1736
1737#undef DIM
1738
1739struct hpp_sort_entry {
1740 struct perf_hpp_fmt hpp;
1741 struct sort_entry *se;
1742};
1743
1744void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1745{
1746 struct hpp_sort_entry *hse;
1747
1748 if (!perf_hpp__is_sort_entry(fmt))
1749 return;
1750
1751 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1752 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1753}
1754
1755static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1756 struct hists *hists, int line __maybe_unused,
1757 int *span __maybe_unused)
1758{
1759 struct hpp_sort_entry *hse;
1760 size_t len = fmt->user_len;
1761
1762 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1763
1764 if (!len)
1765 len = hists__col_len(hists, hse->se->se_width_idx);
1766
1767 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1768}
1769
1770static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1771 struct perf_hpp *hpp __maybe_unused,
1772 struct hists *hists)
1773{
1774 struct hpp_sort_entry *hse;
1775 size_t len = fmt->user_len;
1776
1777 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1778
1779 if (!len)
1780 len = hists__col_len(hists, hse->se->se_width_idx);
1781
1782 return len;
1783}
1784
1785static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1786 struct hist_entry *he)
1787{
1788 struct hpp_sort_entry *hse;
1789 size_t len = fmt->user_len;
1790
1791 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1792
1793 if (!len)
1794 len = hists__col_len(he->hists, hse->se->se_width_idx);
1795
1796 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1797}
1798
1799static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1800 struct hist_entry *a, struct hist_entry *b)
1801{
1802 struct hpp_sort_entry *hse;
1803
1804 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1805 return hse->se->se_cmp(a, b);
1806}
1807
1808static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1809 struct hist_entry *a, struct hist_entry *b)
1810{
1811 struct hpp_sort_entry *hse;
1812 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1813
1814 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1815 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1816 return collapse_fn(a, b);
1817}
1818
1819static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1820 struct hist_entry *a, struct hist_entry *b)
1821{
1822 struct hpp_sort_entry *hse;
1823 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1824
1825 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1826 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1827 return sort_fn(a, b);
1828}
1829
1830bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1831{
1832 return format->header == __sort__hpp_header;
1833}
1834
1835#define MK_SORT_ENTRY_CHK(key) \
1836bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1837{ \
1838 struct hpp_sort_entry *hse; \
1839 \
1840 if (!perf_hpp__is_sort_entry(fmt)) \
1841 return false; \
1842 \
1843 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1844 return hse->se == &sort_ ## key ; \
1845}
1846
1847MK_SORT_ENTRY_CHK(trace)
1848MK_SORT_ENTRY_CHK(srcline)
1849MK_SORT_ENTRY_CHK(srcfile)
1850MK_SORT_ENTRY_CHK(thread)
1851MK_SORT_ENTRY_CHK(comm)
1852MK_SORT_ENTRY_CHK(dso)
1853MK_SORT_ENTRY_CHK(sym)
1854
1855
1856static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1857{
1858 struct hpp_sort_entry *hse_a;
1859 struct hpp_sort_entry *hse_b;
1860
1861 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1862 return false;
1863
1864 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1865 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1866
1867 return hse_a->se == hse_b->se;
1868}
1869
1870static void hse_free(struct perf_hpp_fmt *fmt)
1871{
1872 struct hpp_sort_entry *hse;
1873
1874 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1875 free(hse);
1876}
1877
1878static struct hpp_sort_entry *
1879__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1880{
1881 struct hpp_sort_entry *hse;
1882
1883 hse = malloc(sizeof(*hse));
1884 if (hse == NULL) {
1885 pr_err("Memory allocation failed\n");
1886 return NULL;
1887 }
1888
1889 hse->se = sd->entry;
1890 hse->hpp.name = sd->entry->se_header;
1891 hse->hpp.header = __sort__hpp_header;
1892 hse->hpp.width = __sort__hpp_width;
1893 hse->hpp.entry = __sort__hpp_entry;
1894 hse->hpp.color = NULL;
1895
1896 hse->hpp.cmp = __sort__hpp_cmp;
1897 hse->hpp.collapse = __sort__hpp_collapse;
1898 hse->hpp.sort = __sort__hpp_sort;
1899 hse->hpp.equal = __sort__hpp_equal;
1900 hse->hpp.free = hse_free;
1901
1902 INIT_LIST_HEAD(&hse->hpp.list);
1903 INIT_LIST_HEAD(&hse->hpp.sort_list);
1904 hse->hpp.elide = false;
1905 hse->hpp.len = 0;
1906 hse->hpp.user_len = 0;
1907 hse->hpp.level = level;
1908
1909 return hse;
1910}
1911
1912static void hpp_free(struct perf_hpp_fmt *fmt)
1913{
1914 free(fmt);
1915}
1916
1917static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1918 int level)
1919{
1920 struct perf_hpp_fmt *fmt;
1921
1922 fmt = memdup(hd->fmt, sizeof(*fmt));
1923 if (fmt) {
1924 INIT_LIST_HEAD(&fmt->list);
1925 INIT_LIST_HEAD(&fmt->sort_list);
1926 fmt->free = hpp_free;
1927 fmt->level = level;
1928 }
1929
1930 return fmt;
1931}
1932
1933int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1934{
1935 struct perf_hpp_fmt *fmt;
1936 struct hpp_sort_entry *hse;
1937 int ret = -1;
1938 int r;
1939
1940 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1941 if (!perf_hpp__is_sort_entry(fmt))
1942 continue;
1943
1944 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1945 if (hse->se->se_filter == NULL)
1946 continue;
1947
1948 /*
1949 * hist entry is filtered if any of sort key in the hpp list
1950 * is applied. But it should skip non-matched filter types.
1951 */
1952 r = hse->se->se_filter(he, type, arg);
1953 if (r >= 0) {
1954 if (ret < 0)
1955 ret = 0;
1956 ret |= r;
1957 }
1958 }
1959
1960 return ret;
1961}
1962
1963static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1964 struct perf_hpp_list *list,
1965 int level)
1966{
1967 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1968
1969 if (hse == NULL)
1970 return -1;
1971
1972 perf_hpp_list__register_sort_field(list, &hse->hpp);
1973 return 0;
1974}
1975
1976static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1977 struct perf_hpp_list *list)
1978{
1979 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1980
1981 if (hse == NULL)
1982 return -1;
1983
1984 perf_hpp_list__column_register(list, &hse->hpp);
1985 return 0;
1986}
1987
1988struct hpp_dynamic_entry {
1989 struct perf_hpp_fmt hpp;
1990 struct evsel *evsel;
1991 struct tep_format_field *field;
1992 unsigned dynamic_len;
1993 bool raw_trace;
1994};
1995
1996static int hde_width(struct hpp_dynamic_entry *hde)
1997{
1998 if (!hde->hpp.len) {
1999 int len = hde->dynamic_len;
2000 int namelen = strlen(hde->field->name);
2001 int fieldlen = hde->field->size;
2002
2003 if (namelen > len)
2004 len = namelen;
2005
2006 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2007 /* length for print hex numbers */
2008 fieldlen = hde->field->size * 2 + 2;
2009 }
2010 if (fieldlen > len)
2011 len = fieldlen;
2012
2013 hde->hpp.len = len;
2014 }
2015 return hde->hpp.len;
2016}
2017
2018static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2019 struct hist_entry *he)
2020{
2021 char *str, *pos;
2022 struct tep_format_field *field = hde->field;
2023 size_t namelen;
2024 bool last = false;
2025
2026 if (hde->raw_trace)
2027 return;
2028
2029 /* parse pretty print result and update max length */
2030 if (!he->trace_output)
2031 he->trace_output = get_trace_output(he);
2032
2033 namelen = strlen(field->name);
2034 str = he->trace_output;
2035
2036 while (str) {
2037 pos = strchr(str, ' ');
2038 if (pos == NULL) {
2039 last = true;
2040 pos = str + strlen(str);
2041 }
2042
2043 if (!strncmp(str, field->name, namelen)) {
2044 size_t len;
2045
2046 str += namelen + 1;
2047 len = pos - str;
2048
2049 if (len > hde->dynamic_len)
2050 hde->dynamic_len = len;
2051 break;
2052 }
2053
2054 if (last)
2055 str = NULL;
2056 else
2057 str = pos + 1;
2058 }
2059}
2060
2061static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2062 struct hists *hists __maybe_unused,
2063 int line __maybe_unused,
2064 int *span __maybe_unused)
2065{
2066 struct hpp_dynamic_entry *hde;
2067 size_t len = fmt->user_len;
2068
2069 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2070
2071 if (!len)
2072 len = hde_width(hde);
2073
2074 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2075}
2076
2077static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2078 struct perf_hpp *hpp __maybe_unused,
2079 struct hists *hists __maybe_unused)
2080{
2081 struct hpp_dynamic_entry *hde;
2082 size_t len = fmt->user_len;
2083
2084 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2085
2086 if (!len)
2087 len = hde_width(hde);
2088
2089 return len;
2090}
2091
2092bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2093{
2094 struct hpp_dynamic_entry *hde;
2095
2096 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2097
2098 return hists_to_evsel(hists) == hde->evsel;
2099}
2100
2101static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2102 struct hist_entry *he)
2103{
2104 struct hpp_dynamic_entry *hde;
2105 size_t len = fmt->user_len;
2106 char *str, *pos;
2107 struct tep_format_field *field;
2108 size_t namelen;
2109 bool last = false;
2110 int ret;
2111
2112 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2113
2114 if (!len)
2115 len = hde_width(hde);
2116
2117 if (hde->raw_trace)
2118 goto raw_field;
2119
2120 if (!he->trace_output)
2121 he->trace_output = get_trace_output(he);
2122
2123 field = hde->field;
2124 namelen = strlen(field->name);
2125 str = he->trace_output;
2126
2127 while (str) {
2128 pos = strchr(str, ' ');
2129 if (pos == NULL) {
2130 last = true;
2131 pos = str + strlen(str);
2132 }
2133
2134 if (!strncmp(str, field->name, namelen)) {
2135 str += namelen + 1;
2136 str = strndup(str, pos - str);
2137
2138 if (str == NULL)
2139 return scnprintf(hpp->buf, hpp->size,
2140 "%*.*s", len, len, "ERROR");
2141 break;
2142 }
2143
2144 if (last)
2145 str = NULL;
2146 else
2147 str = pos + 1;
2148 }
2149
2150 if (str == NULL) {
2151 struct trace_seq seq;
2152raw_field:
2153 trace_seq_init(&seq);
2154 tep_print_field(&seq, he->raw_data, hde->field);
2155 str = seq.buffer;
2156 }
2157
2158 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2159 free(str);
2160 return ret;
2161}
2162
2163static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2164 struct hist_entry *a, struct hist_entry *b)
2165{
2166 struct hpp_dynamic_entry *hde;
2167 struct tep_format_field *field;
2168 unsigned offset, size;
2169
2170 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2171
2172 if (b == NULL) {
2173 update_dynamic_len(hde, a);
2174 return 0;
2175 }
2176
2177 field = hde->field;
2178 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2179 unsigned long long dyn;
2180
2181 tep_read_number_field(field, a->raw_data, &dyn);
2182 offset = dyn & 0xffff;
2183 size = (dyn >> 16) & 0xffff;
2184
2185 /* record max width for output */
2186 if (size > hde->dynamic_len)
2187 hde->dynamic_len = size;
2188 } else {
2189 offset = field->offset;
2190 size = field->size;
2191 }
2192
2193 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2194}
2195
2196bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2197{
2198 return fmt->cmp == __sort__hde_cmp;
2199}
2200
2201static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2202{
2203 struct hpp_dynamic_entry *hde_a;
2204 struct hpp_dynamic_entry *hde_b;
2205
2206 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2207 return false;
2208
2209 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2210 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2211
2212 return hde_a->field == hde_b->field;
2213}
2214
2215static void hde_free(struct perf_hpp_fmt *fmt)
2216{
2217 struct hpp_dynamic_entry *hde;
2218
2219 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2220 free(hde);
2221}
2222
2223static struct hpp_dynamic_entry *
2224__alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2225 int level)
2226{
2227 struct hpp_dynamic_entry *hde;
2228
2229 hde = malloc(sizeof(*hde));
2230 if (hde == NULL) {
2231 pr_debug("Memory allocation failed\n");
2232 return NULL;
2233 }
2234
2235 hde->evsel = evsel;
2236 hde->field = field;
2237 hde->dynamic_len = 0;
2238
2239 hde->hpp.name = field->name;
2240 hde->hpp.header = __sort__hde_header;
2241 hde->hpp.width = __sort__hde_width;
2242 hde->hpp.entry = __sort__hde_entry;
2243 hde->hpp.color = NULL;
2244
2245 hde->hpp.cmp = __sort__hde_cmp;
2246 hde->hpp.collapse = __sort__hde_cmp;
2247 hde->hpp.sort = __sort__hde_cmp;
2248 hde->hpp.equal = __sort__hde_equal;
2249 hde->hpp.free = hde_free;
2250
2251 INIT_LIST_HEAD(&hde->hpp.list);
2252 INIT_LIST_HEAD(&hde->hpp.sort_list);
2253 hde->hpp.elide = false;
2254 hde->hpp.len = 0;
2255 hde->hpp.user_len = 0;
2256 hde->hpp.level = level;
2257
2258 return hde;
2259}
2260
2261struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2262{
2263 struct perf_hpp_fmt *new_fmt = NULL;
2264
2265 if (perf_hpp__is_sort_entry(fmt)) {
2266 struct hpp_sort_entry *hse, *new_hse;
2267
2268 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2269 new_hse = memdup(hse, sizeof(*hse));
2270 if (new_hse)
2271 new_fmt = &new_hse->hpp;
2272 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2273 struct hpp_dynamic_entry *hde, *new_hde;
2274
2275 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2276 new_hde = memdup(hde, sizeof(*hde));
2277 if (new_hde)
2278 new_fmt = &new_hde->hpp;
2279 } else {
2280 new_fmt = memdup(fmt, sizeof(*fmt));
2281 }
2282
2283 INIT_LIST_HEAD(&new_fmt->list);
2284 INIT_LIST_HEAD(&new_fmt->sort_list);
2285
2286 return new_fmt;
2287}
2288
2289static int parse_field_name(char *str, char **event, char **field, char **opt)
2290{
2291 char *event_name, *field_name, *opt_name;
2292
2293 event_name = str;
2294 field_name = strchr(str, '.');
2295
2296 if (field_name) {
2297 *field_name++ = '\0';
2298 } else {
2299 event_name = NULL;
2300 field_name = str;
2301 }
2302
2303 opt_name = strchr(field_name, '/');
2304 if (opt_name)
2305 *opt_name++ = '\0';
2306
2307 *event = event_name;
2308 *field = field_name;
2309 *opt = opt_name;
2310
2311 return 0;
2312}
2313
2314/* find match evsel using a given event name. The event name can be:
2315 * 1. '%' + event index (e.g. '%1' for first event)
2316 * 2. full event name (e.g. sched:sched_switch)
2317 * 3. partial event name (should not contain ':')
2318 */
2319static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2320{
2321 struct evsel *evsel = NULL;
2322 struct evsel *pos;
2323 bool full_name;
2324
2325 /* case 1 */
2326 if (event_name[0] == '%') {
2327 int nr = strtol(event_name+1, NULL, 0);
2328
2329 if (nr > evlist->core.nr_entries)
2330 return NULL;
2331
2332 evsel = evlist__first(evlist);
2333 while (--nr > 0)
2334 evsel = perf_evsel__next(evsel);
2335
2336 return evsel;
2337 }
2338
2339 full_name = !!strchr(event_name, ':');
2340 evlist__for_each_entry(evlist, pos) {
2341 /* case 2 */
2342 if (full_name && !strcmp(pos->name, event_name))
2343 return pos;
2344 /* case 3 */
2345 if (!full_name && strstr(pos->name, event_name)) {
2346 if (evsel) {
2347 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2348 event_name, evsel->name, pos->name);
2349 return NULL;
2350 }
2351 evsel = pos;
2352 }
2353 }
2354
2355 return evsel;
2356}
2357
2358static int __dynamic_dimension__add(struct evsel *evsel,
2359 struct tep_format_field *field,
2360 bool raw_trace, int level)
2361{
2362 struct hpp_dynamic_entry *hde;
2363
2364 hde = __alloc_dynamic_entry(evsel, field, level);
2365 if (hde == NULL)
2366 return -ENOMEM;
2367
2368 hde->raw_trace = raw_trace;
2369
2370 perf_hpp__register_sort_field(&hde->hpp);
2371 return 0;
2372}
2373
2374static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2375{
2376 int ret;
2377 struct tep_format_field *field;
2378
2379 field = evsel->tp_format->format.fields;
2380 while (field) {
2381 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2382 if (ret < 0)
2383 return ret;
2384
2385 field = field->next;
2386 }
2387 return 0;
2388}
2389
2390static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2391 int level)
2392{
2393 int ret;
2394 struct evsel *evsel;
2395
2396 evlist__for_each_entry(evlist, evsel) {
2397 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2398 continue;
2399
2400 ret = add_evsel_fields(evsel, raw_trace, level);
2401 if (ret < 0)
2402 return ret;
2403 }
2404 return 0;
2405}
2406
2407static int add_all_matching_fields(struct evlist *evlist,
2408 char *field_name, bool raw_trace, int level)
2409{
2410 int ret = -ESRCH;
2411 struct evsel *evsel;
2412 struct tep_format_field *field;
2413
2414 evlist__for_each_entry(evlist, evsel) {
2415 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2416 continue;
2417
2418 field = tep_find_any_field(evsel->tp_format, field_name);
2419 if (field == NULL)
2420 continue;
2421
2422 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2423 if (ret < 0)
2424 break;
2425 }
2426 return ret;
2427}
2428
2429static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2430 int level)
2431{
2432 char *str, *event_name, *field_name, *opt_name;
2433 struct evsel *evsel;
2434 struct tep_format_field *field;
2435 bool raw_trace = symbol_conf.raw_trace;
2436 int ret = 0;
2437
2438 if (evlist == NULL)
2439 return -ENOENT;
2440
2441 str = strdup(tok);
2442 if (str == NULL)
2443 return -ENOMEM;
2444
2445 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2446 ret = -EINVAL;
2447 goto out;
2448 }
2449
2450 if (opt_name) {
2451 if (strcmp(opt_name, "raw")) {
2452 pr_debug("unsupported field option %s\n", opt_name);
2453 ret = -EINVAL;
2454 goto out;
2455 }
2456 raw_trace = true;
2457 }
2458
2459 if (!strcmp(field_name, "trace_fields")) {
2460 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2461 goto out;
2462 }
2463
2464 if (event_name == NULL) {
2465 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2466 goto out;
2467 }
2468
2469 evsel = find_evsel(evlist, event_name);
2470 if (evsel == NULL) {
2471 pr_debug("Cannot find event: %s\n", event_name);
2472 ret = -ENOENT;
2473 goto out;
2474 }
2475
2476 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2477 pr_debug("%s is not a tracepoint event\n", event_name);
2478 ret = -EINVAL;
2479 goto out;
2480 }
2481
2482 if (!strcmp(field_name, "*")) {
2483 ret = add_evsel_fields(evsel, raw_trace, level);
2484 } else {
2485 field = tep_find_any_field(evsel->tp_format, field_name);
2486 if (field == NULL) {
2487 pr_debug("Cannot find event field for %s.%s\n",
2488 event_name, field_name);
2489 return -ENOENT;
2490 }
2491
2492 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2493 }
2494
2495out:
2496 free(str);
2497 return ret;
2498}
2499
2500static int __sort_dimension__add(struct sort_dimension *sd,
2501 struct perf_hpp_list *list,
2502 int level)
2503{
2504 if (sd->taken)
2505 return 0;
2506
2507 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2508 return -1;
2509
2510 if (sd->entry->se_collapse)
2511 list->need_collapse = 1;
2512
2513 sd->taken = 1;
2514
2515 return 0;
2516}
2517
2518static int __hpp_dimension__add(struct hpp_dimension *hd,
2519 struct perf_hpp_list *list,
2520 int level)
2521{
2522 struct perf_hpp_fmt *fmt;
2523
2524 if (hd->taken)
2525 return 0;
2526
2527 fmt = __hpp_dimension__alloc_hpp(hd, level);
2528 if (!fmt)
2529 return -1;
2530
2531 hd->taken = 1;
2532 perf_hpp_list__register_sort_field(list, fmt);
2533 return 0;
2534}
2535
2536static int __sort_dimension__add_output(struct perf_hpp_list *list,
2537 struct sort_dimension *sd)
2538{
2539 if (sd->taken)
2540 return 0;
2541
2542 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2543 return -1;
2544
2545 sd->taken = 1;
2546 return 0;
2547}
2548
2549static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2550 struct hpp_dimension *hd)
2551{
2552 struct perf_hpp_fmt *fmt;
2553
2554 if (hd->taken)
2555 return 0;
2556
2557 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2558 if (!fmt)
2559 return -1;
2560
2561 hd->taken = 1;
2562 perf_hpp_list__column_register(list, fmt);
2563 return 0;
2564}
2565
2566int hpp_dimension__add_output(unsigned col)
2567{
2568 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2569 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2570}
2571
2572int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2573 struct evlist *evlist,
2574 int level)
2575{
2576 unsigned int i;
2577
2578 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2579 struct sort_dimension *sd = &common_sort_dimensions[i];
2580
2581 if (strncasecmp(tok, sd->name, strlen(tok)))
2582 continue;
2583
2584 if (sd->entry == &sort_parent) {
2585 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2586 if (ret) {
2587 char err[BUFSIZ];
2588
2589 regerror(ret, &parent_regex, err, sizeof(err));
2590 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2591 return -EINVAL;
2592 }
2593 list->parent = 1;
2594 } else if (sd->entry == &sort_sym) {
2595 list->sym = 1;
2596 /*
2597 * perf diff displays the performance difference amongst
2598 * two or more perf.data files. Those files could come
2599 * from different binaries. So we should not compare
2600 * their ips, but the name of symbol.
2601 */
2602 if (sort__mode == SORT_MODE__DIFF)
2603 sd->entry->se_collapse = sort__sym_sort;
2604
2605 } else if (sd->entry == &sort_dso) {
2606 list->dso = 1;
2607 } else if (sd->entry == &sort_socket) {
2608 list->socket = 1;
2609 } else if (sd->entry == &sort_thread) {
2610 list->thread = 1;
2611 } else if (sd->entry == &sort_comm) {
2612 list->comm = 1;
2613 }
2614
2615 return __sort_dimension__add(sd, list, level);
2616 }
2617
2618 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2619 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2620
2621 if (strncasecmp(tok, hd->name, strlen(tok)))
2622 continue;
2623
2624 return __hpp_dimension__add(hd, list, level);
2625 }
2626
2627 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2628 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2629
2630 if (strncasecmp(tok, sd->name, strlen(tok)))
2631 continue;
2632
2633 if (sort__mode != SORT_MODE__BRANCH)
2634 return -EINVAL;
2635
2636 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2637 list->sym = 1;
2638
2639 __sort_dimension__add(sd, list, level);
2640 return 0;
2641 }
2642
2643 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2644 struct sort_dimension *sd = &memory_sort_dimensions[i];
2645
2646 if (strncasecmp(tok, sd->name, strlen(tok)))
2647 continue;
2648
2649 if (sort__mode != SORT_MODE__MEMORY)
2650 return -EINVAL;
2651
2652 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2653 return -EINVAL;
2654
2655 if (sd->entry == &sort_mem_daddr_sym)
2656 list->sym = 1;
2657
2658 __sort_dimension__add(sd, list, level);
2659 return 0;
2660 }
2661
2662 if (!add_dynamic_entry(evlist, tok, level))
2663 return 0;
2664
2665 return -ESRCH;
2666}
2667
2668static int setup_sort_list(struct perf_hpp_list *list, char *str,
2669 struct evlist *evlist)
2670{
2671 char *tmp, *tok;
2672 int ret = 0;
2673 int level = 0;
2674 int next_level = 1;
2675 bool in_group = false;
2676
2677 do {
2678 tok = str;
2679 tmp = strpbrk(str, "{}, ");
2680 if (tmp) {
2681 if (in_group)
2682 next_level = level;
2683 else
2684 next_level = level + 1;
2685
2686 if (*tmp == '{')
2687 in_group = true;
2688 else if (*tmp == '}')
2689 in_group = false;
2690
2691 *tmp = '\0';
2692 str = tmp + 1;
2693 }
2694
2695 if (*tok) {
2696 ret = sort_dimension__add(list, tok, evlist, level);
2697 if (ret == -EINVAL) {
2698 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2699 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2700 else
2701 pr_err("Invalid --sort key: `%s'", tok);
2702 break;
2703 } else if (ret == -ESRCH) {
2704 pr_err("Unknown --sort key: `%s'", tok);
2705 break;
2706 }
2707 }
2708
2709 level = next_level;
2710 } while (tmp);
2711
2712 return ret;
2713}
2714
2715static const char *get_default_sort_order(struct evlist *evlist)
2716{
2717 const char *default_sort_orders[] = {
2718 default_sort_order,
2719 default_branch_sort_order,
2720 default_mem_sort_order,
2721 default_top_sort_order,
2722 default_diff_sort_order,
2723 default_tracepoint_sort_order,
2724 };
2725 bool use_trace = true;
2726 struct evsel *evsel;
2727
2728 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2729
2730 if (evlist == NULL || perf_evlist__empty(evlist))
2731 goto out_no_evlist;
2732
2733 evlist__for_each_entry(evlist, evsel) {
2734 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2735 use_trace = false;
2736 break;
2737 }
2738 }
2739
2740 if (use_trace) {
2741 sort__mode = SORT_MODE__TRACEPOINT;
2742 if (symbol_conf.raw_trace)
2743 return "trace_fields";
2744 }
2745out_no_evlist:
2746 return default_sort_orders[sort__mode];
2747}
2748
2749static int setup_sort_order(struct evlist *evlist)
2750{
2751 char *new_sort_order;
2752
2753 /*
2754 * Append '+'-prefixed sort order to the default sort
2755 * order string.
2756 */
2757 if (!sort_order || is_strict_order(sort_order))
2758 return 0;
2759
2760 if (sort_order[1] == '\0') {
2761 pr_err("Invalid --sort key: `+'");
2762 return -EINVAL;
2763 }
2764
2765 /*
2766 * We allocate new sort_order string, but we never free it,
2767 * because it's checked over the rest of the code.
2768 */
2769 if (asprintf(&new_sort_order, "%s,%s",
2770 get_default_sort_order(evlist), sort_order + 1) < 0) {
2771 pr_err("Not enough memory to set up --sort");
2772 return -ENOMEM;
2773 }
2774
2775 sort_order = new_sort_order;
2776 return 0;
2777}
2778
2779/*
2780 * Adds 'pre,' prefix into 'str' is 'pre' is
2781 * not already part of 'str'.
2782 */
2783static char *prefix_if_not_in(const char *pre, char *str)
2784{
2785 char *n;
2786
2787 if (!str || strstr(str, pre))
2788 return str;
2789
2790 if (asprintf(&n, "%s,%s", pre, str) < 0)
2791 return NULL;
2792
2793 free(str);
2794 return n;
2795}
2796
2797static char *setup_overhead(char *keys)
2798{
2799 if (sort__mode == SORT_MODE__DIFF)
2800 return keys;
2801
2802 keys = prefix_if_not_in("overhead", keys);
2803
2804 if (symbol_conf.cumulate_callchain)
2805 keys = prefix_if_not_in("overhead_children", keys);
2806
2807 return keys;
2808}
2809
2810static int __setup_sorting(struct evlist *evlist)
2811{
2812 char *str;
2813 const char *sort_keys;
2814 int ret = 0;
2815
2816 ret = setup_sort_order(evlist);
2817 if (ret)
2818 return ret;
2819
2820 sort_keys = sort_order;
2821 if (sort_keys == NULL) {
2822 if (is_strict_order(field_order)) {
2823 /*
2824 * If user specified field order but no sort order,
2825 * we'll honor it and not add default sort orders.
2826 */
2827 return 0;
2828 }
2829
2830 sort_keys = get_default_sort_order(evlist);
2831 }
2832
2833 str = strdup(sort_keys);
2834 if (str == NULL) {
2835 pr_err("Not enough memory to setup sort keys");
2836 return -ENOMEM;
2837 }
2838
2839 /*
2840 * Prepend overhead fields for backward compatibility.
2841 */
2842 if (!is_strict_order(field_order)) {
2843 str = setup_overhead(str);
2844 if (str == NULL) {
2845 pr_err("Not enough memory to setup overhead keys");
2846 return -ENOMEM;
2847 }
2848 }
2849
2850 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2851
2852 free(str);
2853 return ret;
2854}
2855
2856void perf_hpp__set_elide(int idx, bool elide)
2857{
2858 struct perf_hpp_fmt *fmt;
2859 struct hpp_sort_entry *hse;
2860
2861 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2862 if (!perf_hpp__is_sort_entry(fmt))
2863 continue;
2864
2865 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2866 if (hse->se->se_width_idx == idx) {
2867 fmt->elide = elide;
2868 break;
2869 }
2870 }
2871}
2872
2873static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2874{
2875 if (list && strlist__nr_entries(list) == 1) {
2876 if (fp != NULL)
2877 fprintf(fp, "# %s: %s\n", list_name,
2878 strlist__entry(list, 0)->s);
2879 return true;
2880 }
2881 return false;
2882}
2883
2884static bool get_elide(int idx, FILE *output)
2885{
2886 switch (idx) {
2887 case HISTC_SYMBOL:
2888 return __get_elide(symbol_conf.sym_list, "symbol", output);
2889 case HISTC_DSO:
2890 return __get_elide(symbol_conf.dso_list, "dso", output);
2891 case HISTC_COMM:
2892 return __get_elide(symbol_conf.comm_list, "comm", output);
2893 default:
2894 break;
2895 }
2896
2897 if (sort__mode != SORT_MODE__BRANCH)
2898 return false;
2899
2900 switch (idx) {
2901 case HISTC_SYMBOL_FROM:
2902 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2903 case HISTC_SYMBOL_TO:
2904 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2905 case HISTC_DSO_FROM:
2906 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2907 case HISTC_DSO_TO:
2908 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2909 default:
2910 break;
2911 }
2912
2913 return false;
2914}
2915
2916void sort__setup_elide(FILE *output)
2917{
2918 struct perf_hpp_fmt *fmt;
2919 struct hpp_sort_entry *hse;
2920
2921 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2922 if (!perf_hpp__is_sort_entry(fmt))
2923 continue;
2924
2925 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2926 fmt->elide = get_elide(hse->se->se_width_idx, output);
2927 }
2928
2929 /*
2930 * It makes no sense to elide all of sort entries.
2931 * Just revert them to show up again.
2932 */
2933 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2934 if (!perf_hpp__is_sort_entry(fmt))
2935 continue;
2936
2937 if (!fmt->elide)
2938 return;
2939 }
2940
2941 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2942 if (!perf_hpp__is_sort_entry(fmt))
2943 continue;
2944
2945 fmt->elide = false;
2946 }
2947}
2948
2949int output_field_add(struct perf_hpp_list *list, char *tok)
2950{
2951 unsigned int i;
2952
2953 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2954 struct sort_dimension *sd = &common_sort_dimensions[i];
2955
2956 if (strncasecmp(tok, sd->name, strlen(tok)))
2957 continue;
2958
2959 return __sort_dimension__add_output(list, sd);
2960 }
2961
2962 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2963 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2964
2965 if (strncasecmp(tok, hd->name, strlen(tok)))
2966 continue;
2967
2968 return __hpp_dimension__add_output(list, hd);
2969 }
2970
2971 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2972 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2973
2974 if (strncasecmp(tok, sd->name, strlen(tok)))
2975 continue;
2976
2977 return __sort_dimension__add_output(list, sd);
2978 }
2979
2980 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2981 struct sort_dimension *sd = &memory_sort_dimensions[i];
2982
2983 if (strncasecmp(tok, sd->name, strlen(tok)))
2984 continue;
2985
2986 return __sort_dimension__add_output(list, sd);
2987 }
2988
2989 return -ESRCH;
2990}
2991
2992static int setup_output_list(struct perf_hpp_list *list, char *str)
2993{
2994 char *tmp, *tok;
2995 int ret = 0;
2996
2997 for (tok = strtok_r(str, ", ", &tmp);
2998 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2999 ret = output_field_add(list, tok);
3000 if (ret == -EINVAL) {
3001 ui__error("Invalid --fields key: `%s'", tok);
3002 break;
3003 } else if (ret == -ESRCH) {
3004 ui__error("Unknown --fields key: `%s'", tok);
3005 break;
3006 }
3007 }
3008
3009 return ret;
3010}
3011
3012void reset_dimensions(void)
3013{
3014 unsigned int i;
3015
3016 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3017 common_sort_dimensions[i].taken = 0;
3018
3019 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3020 hpp_sort_dimensions[i].taken = 0;
3021
3022 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3023 bstack_sort_dimensions[i].taken = 0;
3024
3025 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3026 memory_sort_dimensions[i].taken = 0;
3027}
3028
3029bool is_strict_order(const char *order)
3030{
3031 return order && (*order != '+');
3032}
3033
3034static int __setup_output_field(void)
3035{
3036 char *str, *strp;
3037 int ret = -EINVAL;
3038
3039 if (field_order == NULL)
3040 return 0;
3041
3042 strp = str = strdup(field_order);
3043 if (str == NULL) {
3044 pr_err("Not enough memory to setup output fields");
3045 return -ENOMEM;
3046 }
3047
3048 if (!is_strict_order(field_order))
3049 strp++;
3050
3051 if (!strlen(strp)) {
3052 pr_err("Invalid --fields key: `+'");
3053 goto out;
3054 }
3055
3056 ret = setup_output_list(&perf_hpp_list, strp);
3057
3058out:
3059 free(str);
3060 return ret;
3061}
3062
3063int setup_sorting(struct evlist *evlist)
3064{
3065 int err;
3066
3067 err = __setup_sorting(evlist);
3068 if (err < 0)
3069 return err;
3070
3071 if (parent_pattern != default_parent_pattern) {
3072 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3073 if (err < 0)
3074 return err;
3075 }
3076
3077 reset_dimensions();
3078
3079 /*
3080 * perf diff doesn't use default hpp output fields.
3081 */
3082 if (sort__mode != SORT_MODE__DIFF)
3083 perf_hpp__init();
3084
3085 err = __setup_output_field();
3086 if (err < 0)
3087 return err;
3088
3089 /* copy sort keys to output fields */
3090 perf_hpp__setup_output_field(&perf_hpp_list);
3091 /* and then copy output fields to sort keys */
3092 perf_hpp__append_sort_keys(&perf_hpp_list);
3093
3094 /* setup hists-specific output fields */
3095 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3096 return -1;
3097
3098 return 0;
3099}
3100
3101void reset_output_field(void)
3102{
3103 perf_hpp_list.need_collapse = 0;
3104 perf_hpp_list.parent = 0;
3105 perf_hpp_list.sym = 0;
3106 perf_hpp_list.dso = 0;
3107
3108 field_order = NULL;
3109 sort_order = NULL;
3110
3111 reset_dimensions();
3112 perf_hpp__reset_output_field(&perf_hpp_list);
3113}
3114
3115#define INDENT (3*8 + 1)
3116
3117static void add_key(struct strbuf *sb, const char *str, int *llen)
3118{
3119 if (*llen >= 75) {
3120 strbuf_addstr(sb, "\n\t\t\t ");
3121 *llen = INDENT;
3122 }
3123 strbuf_addf(sb, " %s", str);
3124 *llen += strlen(str) + 1;
3125}
3126
3127static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3128 int *llen)
3129{
3130 int i;
3131
3132 for (i = 0; i < n; i++)
3133 add_key(sb, s[i].name, llen);
3134}
3135
3136static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3137 int *llen)
3138{
3139 int i;
3140
3141 for (i = 0; i < n; i++)
3142 add_key(sb, s[i].name, llen);
3143}
3144
3145const char *sort_help(const char *prefix)
3146{
3147 struct strbuf sb;
3148 char *s;
3149 int len = strlen(prefix) + INDENT;
3150
3151 strbuf_init(&sb, 300);
3152 strbuf_addstr(&sb, prefix);
3153 add_hpp_sort_string(&sb, hpp_sort_dimensions,
3154 ARRAY_SIZE(hpp_sort_dimensions), &len);
3155 add_sort_string(&sb, common_sort_dimensions,
3156 ARRAY_SIZE(common_sort_dimensions), &len);
3157 add_sort_string(&sb, bstack_sort_dimensions,
3158 ARRAY_SIZE(bstack_sort_dimensions), &len);
3159 add_sort_string(&sb, memory_sort_dimensions,
3160 ARRAY_SIZE(memory_sort_dimensions), &len);
3161 s = strbuf_detach(&sb, NULL);
3162 strbuf_release(&sb);
3163 return s;
3164}
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <regex.h>
5#include <sys/mman.h>
6#include "sort.h"
7#include "hist.h"
8#include "comm.h"
9#include "symbol.h"
10#include "thread.h"
11#include "evsel.h"
12#include "evlist.h"
13#include "strlist.h"
14#include <traceevent/event-parse.h>
15#include "mem-events.h"
16#include <linux/kernel.h>
17
18regex_t parent_regex;
19const char default_parent_pattern[] = "^sys_|^do_page_fault";
20const char *parent_pattern = default_parent_pattern;
21const char *default_sort_order = "comm,dso,symbol";
22const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
23const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
24const char default_top_sort_order[] = "dso,symbol";
25const char default_diff_sort_order[] = "dso,symbol";
26const char default_tracepoint_sort_order[] = "trace";
27const char *sort_order;
28const char *field_order;
29regex_t ignore_callees_regex;
30int have_ignore_callees = 0;
31enum sort_mode sort__mode = SORT_MODE__NORMAL;
32
33/*
34 * Replaces all occurrences of a char used with the:
35 *
36 * -t, --field-separator
37 *
38 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator.
41*/
42static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
43{
44 int n;
45 va_list ap;
46
47 va_start(ap, fmt);
48 n = vsnprintf(bf, size, fmt, ap);
49 if (symbol_conf.field_sep && n > 0) {
50 char *sep = bf;
51
52 while (1) {
53 sep = strchr(sep, *symbol_conf.field_sep);
54 if (sep == NULL)
55 break;
56 *sep = '.';
57 }
58 }
59 va_end(ap);
60
61 if (n >= (int)size)
62 return size - 1;
63 return n;
64}
65
66static int64_t cmp_null(const void *l, const void *r)
67{
68 if (!l && !r)
69 return 0;
70 else if (!l)
71 return -1;
72 else
73 return 1;
74}
75
76/* --sort pid */
77
78static int64_t
79sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
80{
81 return right->thread->tid - left->thread->tid;
82}
83
84static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
85 size_t size, unsigned int width)
86{
87 const char *comm = thread__comm_str(he->thread);
88
89 width = max(7U, width) - 8;
90 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
91 width, width, comm ?: "");
92}
93
94static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
95{
96 const struct thread *th = arg;
97
98 if (type != HIST_FILTER__THREAD)
99 return -1;
100
101 return th && he->thread != th;
102}
103
104struct sort_entry sort_thread = {
105 .se_header = " Pid:Command",
106 .se_cmp = sort__thread_cmp,
107 .se_snprintf = hist_entry__thread_snprintf,
108 .se_filter = hist_entry__thread_filter,
109 .se_width_idx = HISTC_THREAD,
110};
111
112/* --sort comm */
113
114/*
115 * We can't use pointer comparison in functions below,
116 * because it gives different results based on pointer
117 * values, which could break some sorting assumptions.
118 */
119static int64_t
120sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
121{
122 return strcmp(comm__str(right->comm), comm__str(left->comm));
123}
124
125static int64_t
126sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
127{
128 return strcmp(comm__str(right->comm), comm__str(left->comm));
129}
130
131static int64_t
132sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
133{
134 return strcmp(comm__str(right->comm), comm__str(left->comm));
135}
136
137static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
138 size_t size, unsigned int width)
139{
140 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
141}
142
143struct sort_entry sort_comm = {
144 .se_header = "Command",
145 .se_cmp = sort__comm_cmp,
146 .se_collapse = sort__comm_collapse,
147 .se_sort = sort__comm_sort,
148 .se_snprintf = hist_entry__comm_snprintf,
149 .se_filter = hist_entry__thread_filter,
150 .se_width_idx = HISTC_COMM,
151};
152
153/* --sort dso */
154
155static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
156{
157 struct dso *dso_l = map_l ? map_l->dso : NULL;
158 struct dso *dso_r = map_r ? map_r->dso : NULL;
159 const char *dso_name_l, *dso_name_r;
160
161 if (!dso_l || !dso_r)
162 return cmp_null(dso_r, dso_l);
163
164 if (verbose > 0) {
165 dso_name_l = dso_l->long_name;
166 dso_name_r = dso_r->long_name;
167 } else {
168 dso_name_l = dso_l->short_name;
169 dso_name_r = dso_r->short_name;
170 }
171
172 return strcmp(dso_name_l, dso_name_r);
173}
174
175static int64_t
176sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
177{
178 return _sort__dso_cmp(right->ms.map, left->ms.map);
179}
180
181static int _hist_entry__dso_snprintf(struct map *map, char *bf,
182 size_t size, unsigned int width)
183{
184 if (map && map->dso) {
185 const char *dso_name = verbose > 0 ? map->dso->long_name :
186 map->dso->short_name;
187 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
188 }
189
190 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
191}
192
193static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
194 size_t size, unsigned int width)
195{
196 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
197}
198
199static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
200{
201 const struct dso *dso = arg;
202
203 if (type != HIST_FILTER__DSO)
204 return -1;
205
206 return dso && (!he->ms.map || he->ms.map->dso != dso);
207}
208
209struct sort_entry sort_dso = {
210 .se_header = "Shared Object",
211 .se_cmp = sort__dso_cmp,
212 .se_snprintf = hist_entry__dso_snprintf,
213 .se_filter = hist_entry__dso_filter,
214 .se_width_idx = HISTC_DSO,
215};
216
217/* --sort symbol */
218
219static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
220{
221 return (int64_t)(right_ip - left_ip);
222}
223
224static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
225{
226 if (!sym_l || !sym_r)
227 return cmp_null(sym_l, sym_r);
228
229 if (sym_l == sym_r)
230 return 0;
231
232 if (sym_l->inlined || sym_r->inlined)
233 return strcmp(sym_l->name, sym_r->name);
234
235 if (sym_l->start != sym_r->start)
236 return (int64_t)(sym_r->start - sym_l->start);
237
238 return (int64_t)(sym_r->end - sym_l->end);
239}
240
241static int64_t
242sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
243{
244 int64_t ret;
245
246 if (!left->ms.sym && !right->ms.sym)
247 return _sort__addr_cmp(left->ip, right->ip);
248
249 /*
250 * comparing symbol address alone is not enough since it's a
251 * relative address within a dso.
252 */
253 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
254 ret = sort__dso_cmp(left, right);
255 if (ret != 0)
256 return ret;
257 }
258
259 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
260}
261
262static int64_t
263sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
264{
265 if (!left->ms.sym || !right->ms.sym)
266 return cmp_null(left->ms.sym, right->ms.sym);
267
268 return strcmp(right->ms.sym->name, left->ms.sym->name);
269}
270
271static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
272 u64 ip, char level, char *bf, size_t size,
273 unsigned int width)
274{
275 size_t ret = 0;
276
277 if (verbose > 0) {
278 char o = map ? dso__symtab_origin(map->dso) : '!';
279 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
280 BITS_PER_LONG / 4 + 2, ip, o);
281 }
282
283 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
284 if (sym && map) {
285 if (map->type == MAP__VARIABLE) {
286 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
287 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
288 ip - map->unmap_ip(map, sym->start));
289 } else {
290 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
291 width - ret,
292 sym->name);
293 if (sym->inlined)
294 ret += repsep_snprintf(bf + ret, size - ret,
295 " (inlined)");
296 }
297 } else {
298 size_t len = BITS_PER_LONG / 4;
299 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
300 len, ip);
301 }
302
303 return ret;
304}
305
306static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
307 size_t size, unsigned int width)
308{
309 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
310 he->level, bf, size, width);
311}
312
313static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
314{
315 const char *sym = arg;
316
317 if (type != HIST_FILTER__SYMBOL)
318 return -1;
319
320 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
321}
322
323struct sort_entry sort_sym = {
324 .se_header = "Symbol",
325 .se_cmp = sort__sym_cmp,
326 .se_sort = sort__sym_sort,
327 .se_snprintf = hist_entry__sym_snprintf,
328 .se_filter = hist_entry__sym_filter,
329 .se_width_idx = HISTC_SYMBOL,
330};
331
332/* --sort srcline */
333
334char *hist_entry__get_srcline(struct hist_entry *he)
335{
336 struct map *map = he->ms.map;
337
338 if (!map)
339 return SRCLINE_UNKNOWN;
340
341 return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
342 he->ms.sym, true, true, he->ip);
343}
344
345static int64_t
346sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
347{
348 if (!left->srcline)
349 left->srcline = hist_entry__get_srcline(left);
350 if (!right->srcline)
351 right->srcline = hist_entry__get_srcline(right);
352
353 return strcmp(right->srcline, left->srcline);
354}
355
356static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
357 size_t size, unsigned int width)
358{
359 if (!he->srcline)
360 he->srcline = hist_entry__get_srcline(he);
361
362 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
363}
364
365struct sort_entry sort_srcline = {
366 .se_header = "Source:Line",
367 .se_cmp = sort__srcline_cmp,
368 .se_snprintf = hist_entry__srcline_snprintf,
369 .se_width_idx = HISTC_SRCLINE,
370};
371
372/* --sort srcline_from */
373
374static int64_t
375sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
376{
377 if (!left->branch_info->srcline_from) {
378 struct map *map = left->branch_info->from.map;
379 if (!map)
380 left->branch_info->srcline_from = SRCLINE_UNKNOWN;
381 else
382 left->branch_info->srcline_from = get_srcline(map->dso,
383 map__rip_2objdump(map,
384 left->branch_info->from.al_addr),
385 left->branch_info->from.sym,
386 true, true,
387 left->branch_info->from.al_addr);
388 }
389 if (!right->branch_info->srcline_from) {
390 struct map *map = right->branch_info->from.map;
391 if (!map)
392 right->branch_info->srcline_from = SRCLINE_UNKNOWN;
393 else
394 right->branch_info->srcline_from = get_srcline(map->dso,
395 map__rip_2objdump(map,
396 right->branch_info->from.al_addr),
397 right->branch_info->from.sym,
398 true, true,
399 right->branch_info->from.al_addr);
400 }
401 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
402}
403
404static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
405 size_t size, unsigned int width)
406{
407 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
408}
409
410struct sort_entry sort_srcline_from = {
411 .se_header = "From Source:Line",
412 .se_cmp = sort__srcline_from_cmp,
413 .se_snprintf = hist_entry__srcline_from_snprintf,
414 .se_width_idx = HISTC_SRCLINE_FROM,
415};
416
417/* --sort srcline_to */
418
419static int64_t
420sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
421{
422 if (!left->branch_info->srcline_to) {
423 struct map *map = left->branch_info->to.map;
424 if (!map)
425 left->branch_info->srcline_to = SRCLINE_UNKNOWN;
426 else
427 left->branch_info->srcline_to = get_srcline(map->dso,
428 map__rip_2objdump(map,
429 left->branch_info->to.al_addr),
430 left->branch_info->from.sym,
431 true, true,
432 left->branch_info->to.al_addr);
433 }
434 if (!right->branch_info->srcline_to) {
435 struct map *map = right->branch_info->to.map;
436 if (!map)
437 right->branch_info->srcline_to = SRCLINE_UNKNOWN;
438 else
439 right->branch_info->srcline_to = get_srcline(map->dso,
440 map__rip_2objdump(map,
441 right->branch_info->to.al_addr),
442 right->branch_info->to.sym,
443 true, true,
444 right->branch_info->to.al_addr);
445 }
446 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
447}
448
449static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
450 size_t size, unsigned int width)
451{
452 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
453}
454
455struct sort_entry sort_srcline_to = {
456 .se_header = "To Source:Line",
457 .se_cmp = sort__srcline_to_cmp,
458 .se_snprintf = hist_entry__srcline_to_snprintf,
459 .se_width_idx = HISTC_SRCLINE_TO,
460};
461
462/* --sort srcfile */
463
464static char no_srcfile[1];
465
466static char *hist_entry__get_srcfile(struct hist_entry *e)
467{
468 char *sf, *p;
469 struct map *map = e->ms.map;
470
471 if (!map)
472 return no_srcfile;
473
474 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
475 e->ms.sym, false, true, true, e->ip);
476 if (!strcmp(sf, SRCLINE_UNKNOWN))
477 return no_srcfile;
478 p = strchr(sf, ':');
479 if (p && *sf) {
480 *p = 0;
481 return sf;
482 }
483 free(sf);
484 return no_srcfile;
485}
486
487static int64_t
488sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
489{
490 if (!left->srcfile)
491 left->srcfile = hist_entry__get_srcfile(left);
492 if (!right->srcfile)
493 right->srcfile = hist_entry__get_srcfile(right);
494
495 return strcmp(right->srcfile, left->srcfile);
496}
497
498static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
499 size_t size, unsigned int width)
500{
501 if (!he->srcfile)
502 he->srcfile = hist_entry__get_srcfile(he);
503
504 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
505}
506
507struct sort_entry sort_srcfile = {
508 .se_header = "Source File",
509 .se_cmp = sort__srcfile_cmp,
510 .se_snprintf = hist_entry__srcfile_snprintf,
511 .se_width_idx = HISTC_SRCFILE,
512};
513
514/* --sort parent */
515
516static int64_t
517sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
518{
519 struct symbol *sym_l = left->parent;
520 struct symbol *sym_r = right->parent;
521
522 if (!sym_l || !sym_r)
523 return cmp_null(sym_l, sym_r);
524
525 return strcmp(sym_r->name, sym_l->name);
526}
527
528static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
529 size_t size, unsigned int width)
530{
531 return repsep_snprintf(bf, size, "%-*.*s", width, width,
532 he->parent ? he->parent->name : "[other]");
533}
534
535struct sort_entry sort_parent = {
536 .se_header = "Parent symbol",
537 .se_cmp = sort__parent_cmp,
538 .se_snprintf = hist_entry__parent_snprintf,
539 .se_width_idx = HISTC_PARENT,
540};
541
542/* --sort cpu */
543
544static int64_t
545sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
546{
547 return right->cpu - left->cpu;
548}
549
550static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
551 size_t size, unsigned int width)
552{
553 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
554}
555
556struct sort_entry sort_cpu = {
557 .se_header = "CPU",
558 .se_cmp = sort__cpu_cmp,
559 .se_snprintf = hist_entry__cpu_snprintf,
560 .se_width_idx = HISTC_CPU,
561};
562
563/* --sort cgroup_id */
564
565static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
566{
567 return (int64_t)(right_dev - left_dev);
568}
569
570static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
571{
572 return (int64_t)(right_ino - left_ino);
573}
574
575static int64_t
576sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
577{
578 int64_t ret;
579
580 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
581 if (ret != 0)
582 return ret;
583
584 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
585 left->cgroup_id.ino);
586}
587
588static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
589 char *bf, size_t size,
590 unsigned int width __maybe_unused)
591{
592 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
593 he->cgroup_id.ino);
594}
595
596struct sort_entry sort_cgroup_id = {
597 .se_header = "cgroup id (dev/inode)",
598 .se_cmp = sort__cgroup_id_cmp,
599 .se_snprintf = hist_entry__cgroup_id_snprintf,
600 .se_width_idx = HISTC_CGROUP_ID,
601};
602
603/* --sort socket */
604
605static int64_t
606sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
607{
608 return right->socket - left->socket;
609}
610
611static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
612 size_t size, unsigned int width)
613{
614 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
615}
616
617static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
618{
619 int sk = *(const int *)arg;
620
621 if (type != HIST_FILTER__SOCKET)
622 return -1;
623
624 return sk >= 0 && he->socket != sk;
625}
626
627struct sort_entry sort_socket = {
628 .se_header = "Socket",
629 .se_cmp = sort__socket_cmp,
630 .se_snprintf = hist_entry__socket_snprintf,
631 .se_filter = hist_entry__socket_filter,
632 .se_width_idx = HISTC_SOCKET,
633};
634
635/* --sort trace */
636
637static char *get_trace_output(struct hist_entry *he)
638{
639 struct trace_seq seq;
640 struct perf_evsel *evsel;
641 struct pevent_record rec = {
642 .data = he->raw_data,
643 .size = he->raw_size,
644 };
645
646 evsel = hists_to_evsel(he->hists);
647
648 trace_seq_init(&seq);
649 if (symbol_conf.raw_trace) {
650 pevent_print_fields(&seq, he->raw_data, he->raw_size,
651 evsel->tp_format);
652 } else {
653 pevent_event_info(&seq, evsel->tp_format, &rec);
654 }
655 /*
656 * Trim the buffer, it starts at 4KB and we're not going to
657 * add anything more to this buffer.
658 */
659 return realloc(seq.buffer, seq.len + 1);
660}
661
662static int64_t
663sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
664{
665 struct perf_evsel *evsel;
666
667 evsel = hists_to_evsel(left->hists);
668 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
669 return 0;
670
671 if (left->trace_output == NULL)
672 left->trace_output = get_trace_output(left);
673 if (right->trace_output == NULL)
674 right->trace_output = get_trace_output(right);
675
676 return strcmp(right->trace_output, left->trace_output);
677}
678
679static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
680 size_t size, unsigned int width)
681{
682 struct perf_evsel *evsel;
683
684 evsel = hists_to_evsel(he->hists);
685 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
686 return scnprintf(bf, size, "%-.*s", width, "N/A");
687
688 if (he->trace_output == NULL)
689 he->trace_output = get_trace_output(he);
690 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
691}
692
693struct sort_entry sort_trace = {
694 .se_header = "Trace output",
695 .se_cmp = sort__trace_cmp,
696 .se_snprintf = hist_entry__trace_snprintf,
697 .se_width_idx = HISTC_TRACE,
698};
699
700/* sort keys for branch stacks */
701
702static int64_t
703sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
704{
705 if (!left->branch_info || !right->branch_info)
706 return cmp_null(left->branch_info, right->branch_info);
707
708 return _sort__dso_cmp(left->branch_info->from.map,
709 right->branch_info->from.map);
710}
711
712static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
713 size_t size, unsigned int width)
714{
715 if (he->branch_info)
716 return _hist_entry__dso_snprintf(he->branch_info->from.map,
717 bf, size, width);
718 else
719 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
720}
721
722static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
723 const void *arg)
724{
725 const struct dso *dso = arg;
726
727 if (type != HIST_FILTER__DSO)
728 return -1;
729
730 return dso && (!he->branch_info || !he->branch_info->from.map ||
731 he->branch_info->from.map->dso != dso);
732}
733
734static int64_t
735sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
736{
737 if (!left->branch_info || !right->branch_info)
738 return cmp_null(left->branch_info, right->branch_info);
739
740 return _sort__dso_cmp(left->branch_info->to.map,
741 right->branch_info->to.map);
742}
743
744static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
745 size_t size, unsigned int width)
746{
747 if (he->branch_info)
748 return _hist_entry__dso_snprintf(he->branch_info->to.map,
749 bf, size, width);
750 else
751 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
752}
753
754static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
755 const void *arg)
756{
757 const struct dso *dso = arg;
758
759 if (type != HIST_FILTER__DSO)
760 return -1;
761
762 return dso && (!he->branch_info || !he->branch_info->to.map ||
763 he->branch_info->to.map->dso != dso);
764}
765
766static int64_t
767sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
768{
769 struct addr_map_symbol *from_l = &left->branch_info->from;
770 struct addr_map_symbol *from_r = &right->branch_info->from;
771
772 if (!left->branch_info || !right->branch_info)
773 return cmp_null(left->branch_info, right->branch_info);
774
775 from_l = &left->branch_info->from;
776 from_r = &right->branch_info->from;
777
778 if (!from_l->sym && !from_r->sym)
779 return _sort__addr_cmp(from_l->addr, from_r->addr);
780
781 return _sort__sym_cmp(from_l->sym, from_r->sym);
782}
783
784static int64_t
785sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
786{
787 struct addr_map_symbol *to_l, *to_r;
788
789 if (!left->branch_info || !right->branch_info)
790 return cmp_null(left->branch_info, right->branch_info);
791
792 to_l = &left->branch_info->to;
793 to_r = &right->branch_info->to;
794
795 if (!to_l->sym && !to_r->sym)
796 return _sort__addr_cmp(to_l->addr, to_r->addr);
797
798 return _sort__sym_cmp(to_l->sym, to_r->sym);
799}
800
801static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
802 size_t size, unsigned int width)
803{
804 if (he->branch_info) {
805 struct addr_map_symbol *from = &he->branch_info->from;
806
807 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
808 he->level, bf, size, width);
809 }
810
811 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
812}
813
814static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
815 size_t size, unsigned int width)
816{
817 if (he->branch_info) {
818 struct addr_map_symbol *to = &he->branch_info->to;
819
820 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
821 he->level, bf, size, width);
822 }
823
824 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
825}
826
827static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
828 const void *arg)
829{
830 const char *sym = arg;
831
832 if (type != HIST_FILTER__SYMBOL)
833 return -1;
834
835 return sym && !(he->branch_info && he->branch_info->from.sym &&
836 strstr(he->branch_info->from.sym->name, sym));
837}
838
839static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
840 const void *arg)
841{
842 const char *sym = arg;
843
844 if (type != HIST_FILTER__SYMBOL)
845 return -1;
846
847 return sym && !(he->branch_info && he->branch_info->to.sym &&
848 strstr(he->branch_info->to.sym->name, sym));
849}
850
851struct sort_entry sort_dso_from = {
852 .se_header = "Source Shared Object",
853 .se_cmp = sort__dso_from_cmp,
854 .se_snprintf = hist_entry__dso_from_snprintf,
855 .se_filter = hist_entry__dso_from_filter,
856 .se_width_idx = HISTC_DSO_FROM,
857};
858
859struct sort_entry sort_dso_to = {
860 .se_header = "Target Shared Object",
861 .se_cmp = sort__dso_to_cmp,
862 .se_snprintf = hist_entry__dso_to_snprintf,
863 .se_filter = hist_entry__dso_to_filter,
864 .se_width_idx = HISTC_DSO_TO,
865};
866
867struct sort_entry sort_sym_from = {
868 .se_header = "Source Symbol",
869 .se_cmp = sort__sym_from_cmp,
870 .se_snprintf = hist_entry__sym_from_snprintf,
871 .se_filter = hist_entry__sym_from_filter,
872 .se_width_idx = HISTC_SYMBOL_FROM,
873};
874
875struct sort_entry sort_sym_to = {
876 .se_header = "Target Symbol",
877 .se_cmp = sort__sym_to_cmp,
878 .se_snprintf = hist_entry__sym_to_snprintf,
879 .se_filter = hist_entry__sym_to_filter,
880 .se_width_idx = HISTC_SYMBOL_TO,
881};
882
883static int64_t
884sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
885{
886 unsigned char mp, p;
887
888 if (!left->branch_info || !right->branch_info)
889 return cmp_null(left->branch_info, right->branch_info);
890
891 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
892 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
893 return mp || p;
894}
895
896static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
897 size_t size, unsigned int width){
898 static const char *out = "N/A";
899
900 if (he->branch_info) {
901 if (he->branch_info->flags.predicted)
902 out = "N";
903 else if (he->branch_info->flags.mispred)
904 out = "Y";
905 }
906
907 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
908}
909
910static int64_t
911sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
912{
913 if (!left->branch_info || !right->branch_info)
914 return cmp_null(left->branch_info, right->branch_info);
915
916 return left->branch_info->flags.cycles -
917 right->branch_info->flags.cycles;
918}
919
920static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
921 size_t size, unsigned int width)
922{
923 if (!he->branch_info)
924 return scnprintf(bf, size, "%-.*s", width, "N/A");
925 if (he->branch_info->flags.cycles == 0)
926 return repsep_snprintf(bf, size, "%-*s", width, "-");
927 return repsep_snprintf(bf, size, "%-*hd", width,
928 he->branch_info->flags.cycles);
929}
930
931struct sort_entry sort_cycles = {
932 .se_header = "Basic Block Cycles",
933 .se_cmp = sort__cycles_cmp,
934 .se_snprintf = hist_entry__cycles_snprintf,
935 .se_width_idx = HISTC_CYCLES,
936};
937
938/* --sort daddr_sym */
939int64_t
940sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
941{
942 uint64_t l = 0, r = 0;
943
944 if (left->mem_info)
945 l = left->mem_info->daddr.addr;
946 if (right->mem_info)
947 r = right->mem_info->daddr.addr;
948
949 return (int64_t)(r - l);
950}
951
952static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
953 size_t size, unsigned int width)
954{
955 uint64_t addr = 0;
956 struct map *map = NULL;
957 struct symbol *sym = NULL;
958
959 if (he->mem_info) {
960 addr = he->mem_info->daddr.addr;
961 map = he->mem_info->daddr.map;
962 sym = he->mem_info->daddr.sym;
963 }
964 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
965 width);
966}
967
968int64_t
969sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
970{
971 uint64_t l = 0, r = 0;
972
973 if (left->mem_info)
974 l = left->mem_info->iaddr.addr;
975 if (right->mem_info)
976 r = right->mem_info->iaddr.addr;
977
978 return (int64_t)(r - l);
979}
980
981static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
982 size_t size, unsigned int width)
983{
984 uint64_t addr = 0;
985 struct map *map = NULL;
986 struct symbol *sym = NULL;
987
988 if (he->mem_info) {
989 addr = he->mem_info->iaddr.addr;
990 map = he->mem_info->iaddr.map;
991 sym = he->mem_info->iaddr.sym;
992 }
993 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
994 width);
995}
996
997static int64_t
998sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
999{
1000 struct map *map_l = NULL;
1001 struct map *map_r = NULL;
1002
1003 if (left->mem_info)
1004 map_l = left->mem_info->daddr.map;
1005 if (right->mem_info)
1006 map_r = right->mem_info->daddr.map;
1007
1008 return _sort__dso_cmp(map_l, map_r);
1009}
1010
1011static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1012 size_t size, unsigned int width)
1013{
1014 struct map *map = NULL;
1015
1016 if (he->mem_info)
1017 map = he->mem_info->daddr.map;
1018
1019 return _hist_entry__dso_snprintf(map, bf, size, width);
1020}
1021
1022static int64_t
1023sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1024{
1025 union perf_mem_data_src data_src_l;
1026 union perf_mem_data_src data_src_r;
1027
1028 if (left->mem_info)
1029 data_src_l = left->mem_info->data_src;
1030 else
1031 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1032
1033 if (right->mem_info)
1034 data_src_r = right->mem_info->data_src;
1035 else
1036 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1037
1038 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1039}
1040
1041static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1042 size_t size, unsigned int width)
1043{
1044 char out[10];
1045
1046 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1047 return repsep_snprintf(bf, size, "%.*s", width, out);
1048}
1049
1050static int64_t
1051sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1052{
1053 union perf_mem_data_src data_src_l;
1054 union perf_mem_data_src data_src_r;
1055
1056 if (left->mem_info)
1057 data_src_l = left->mem_info->data_src;
1058 else
1059 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1060
1061 if (right->mem_info)
1062 data_src_r = right->mem_info->data_src;
1063 else
1064 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1065
1066 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1067}
1068
1069static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1070 size_t size, unsigned int width)
1071{
1072 char out[64];
1073
1074 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1075 return repsep_snprintf(bf, size, "%-*s", width, out);
1076}
1077
1078static int64_t
1079sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1080{
1081 union perf_mem_data_src data_src_l;
1082 union perf_mem_data_src data_src_r;
1083
1084 if (left->mem_info)
1085 data_src_l = left->mem_info->data_src;
1086 else
1087 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1088
1089 if (right->mem_info)
1090 data_src_r = right->mem_info->data_src;
1091 else
1092 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1093
1094 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1095}
1096
1097static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1098 size_t size, unsigned int width)
1099{
1100 char out[64];
1101
1102 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1103 return repsep_snprintf(bf, size, "%-*s", width, out);
1104}
1105
1106static int64_t
1107sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1108{
1109 union perf_mem_data_src data_src_l;
1110 union perf_mem_data_src data_src_r;
1111
1112 if (left->mem_info)
1113 data_src_l = left->mem_info->data_src;
1114 else
1115 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1116
1117 if (right->mem_info)
1118 data_src_r = right->mem_info->data_src;
1119 else
1120 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1121
1122 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1123}
1124
1125static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1126 size_t size, unsigned int width)
1127{
1128 char out[64];
1129
1130 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1131 return repsep_snprintf(bf, size, "%-*s", width, out);
1132}
1133
1134int64_t
1135sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1136{
1137 u64 l, r;
1138 struct map *l_map, *r_map;
1139
1140 if (!left->mem_info) return -1;
1141 if (!right->mem_info) return 1;
1142
1143 /* group event types together */
1144 if (left->cpumode > right->cpumode) return -1;
1145 if (left->cpumode < right->cpumode) return 1;
1146
1147 l_map = left->mem_info->daddr.map;
1148 r_map = right->mem_info->daddr.map;
1149
1150 /* if both are NULL, jump to sort on al_addr instead */
1151 if (!l_map && !r_map)
1152 goto addr;
1153
1154 if (!l_map) return -1;
1155 if (!r_map) return 1;
1156
1157 if (l_map->maj > r_map->maj) return -1;
1158 if (l_map->maj < r_map->maj) return 1;
1159
1160 if (l_map->min > r_map->min) return -1;
1161 if (l_map->min < r_map->min) return 1;
1162
1163 if (l_map->ino > r_map->ino) return -1;
1164 if (l_map->ino < r_map->ino) return 1;
1165
1166 if (l_map->ino_generation > r_map->ino_generation) return -1;
1167 if (l_map->ino_generation < r_map->ino_generation) return 1;
1168
1169 /*
1170 * Addresses with no major/minor numbers are assumed to be
1171 * anonymous in userspace. Sort those on pid then address.
1172 *
1173 * The kernel and non-zero major/minor mapped areas are
1174 * assumed to be unity mapped. Sort those on address.
1175 */
1176
1177 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1178 (!(l_map->flags & MAP_SHARED)) &&
1179 !l_map->maj && !l_map->min && !l_map->ino &&
1180 !l_map->ino_generation) {
1181 /* userspace anonymous */
1182
1183 if (left->thread->pid_ > right->thread->pid_) return -1;
1184 if (left->thread->pid_ < right->thread->pid_) return 1;
1185 }
1186
1187addr:
1188 /* al_addr does all the right addr - start + offset calculations */
1189 l = cl_address(left->mem_info->daddr.al_addr);
1190 r = cl_address(right->mem_info->daddr.al_addr);
1191
1192 if (l > r) return -1;
1193 if (l < r) return 1;
1194
1195 return 0;
1196}
1197
1198static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1199 size_t size, unsigned int width)
1200{
1201
1202 uint64_t addr = 0;
1203 struct map *map = NULL;
1204 struct symbol *sym = NULL;
1205 char level = he->level;
1206
1207 if (he->mem_info) {
1208 addr = cl_address(he->mem_info->daddr.al_addr);
1209 map = he->mem_info->daddr.map;
1210 sym = he->mem_info->daddr.sym;
1211
1212 /* print [s] for shared data mmaps */
1213 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1214 map && (map->type == MAP__VARIABLE) &&
1215 (map->flags & MAP_SHARED) &&
1216 (map->maj || map->min || map->ino ||
1217 map->ino_generation))
1218 level = 's';
1219 else if (!map)
1220 level = 'X';
1221 }
1222 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1223 width);
1224}
1225
1226struct sort_entry sort_mispredict = {
1227 .se_header = "Branch Mispredicted",
1228 .se_cmp = sort__mispredict_cmp,
1229 .se_snprintf = hist_entry__mispredict_snprintf,
1230 .se_width_idx = HISTC_MISPREDICT,
1231};
1232
1233static u64 he_weight(struct hist_entry *he)
1234{
1235 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1236}
1237
1238static int64_t
1239sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1240{
1241 return he_weight(left) - he_weight(right);
1242}
1243
1244static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1245 size_t size, unsigned int width)
1246{
1247 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1248}
1249
1250struct sort_entry sort_local_weight = {
1251 .se_header = "Local Weight",
1252 .se_cmp = sort__local_weight_cmp,
1253 .se_snprintf = hist_entry__local_weight_snprintf,
1254 .se_width_idx = HISTC_LOCAL_WEIGHT,
1255};
1256
1257static int64_t
1258sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1259{
1260 return left->stat.weight - right->stat.weight;
1261}
1262
1263static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1264 size_t size, unsigned int width)
1265{
1266 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1267}
1268
1269struct sort_entry sort_global_weight = {
1270 .se_header = "Weight",
1271 .se_cmp = sort__global_weight_cmp,
1272 .se_snprintf = hist_entry__global_weight_snprintf,
1273 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1274};
1275
1276struct sort_entry sort_mem_daddr_sym = {
1277 .se_header = "Data Symbol",
1278 .se_cmp = sort__daddr_cmp,
1279 .se_snprintf = hist_entry__daddr_snprintf,
1280 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1281};
1282
1283struct sort_entry sort_mem_iaddr_sym = {
1284 .se_header = "Code Symbol",
1285 .se_cmp = sort__iaddr_cmp,
1286 .se_snprintf = hist_entry__iaddr_snprintf,
1287 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1288};
1289
1290struct sort_entry sort_mem_daddr_dso = {
1291 .se_header = "Data Object",
1292 .se_cmp = sort__dso_daddr_cmp,
1293 .se_snprintf = hist_entry__dso_daddr_snprintf,
1294 .se_width_idx = HISTC_MEM_DADDR_DSO,
1295};
1296
1297struct sort_entry sort_mem_locked = {
1298 .se_header = "Locked",
1299 .se_cmp = sort__locked_cmp,
1300 .se_snprintf = hist_entry__locked_snprintf,
1301 .se_width_idx = HISTC_MEM_LOCKED,
1302};
1303
1304struct sort_entry sort_mem_tlb = {
1305 .se_header = "TLB access",
1306 .se_cmp = sort__tlb_cmp,
1307 .se_snprintf = hist_entry__tlb_snprintf,
1308 .se_width_idx = HISTC_MEM_TLB,
1309};
1310
1311struct sort_entry sort_mem_lvl = {
1312 .se_header = "Memory access",
1313 .se_cmp = sort__lvl_cmp,
1314 .se_snprintf = hist_entry__lvl_snprintf,
1315 .se_width_idx = HISTC_MEM_LVL,
1316};
1317
1318struct sort_entry sort_mem_snoop = {
1319 .se_header = "Snoop",
1320 .se_cmp = sort__snoop_cmp,
1321 .se_snprintf = hist_entry__snoop_snprintf,
1322 .se_width_idx = HISTC_MEM_SNOOP,
1323};
1324
1325struct sort_entry sort_mem_dcacheline = {
1326 .se_header = "Data Cacheline",
1327 .se_cmp = sort__dcacheline_cmp,
1328 .se_snprintf = hist_entry__dcacheline_snprintf,
1329 .se_width_idx = HISTC_MEM_DCACHELINE,
1330};
1331
1332static int64_t
1333sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1334{
1335 uint64_t l = 0, r = 0;
1336
1337 if (left->mem_info)
1338 l = left->mem_info->daddr.phys_addr;
1339 if (right->mem_info)
1340 r = right->mem_info->daddr.phys_addr;
1341
1342 return (int64_t)(r - l);
1343}
1344
1345static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1346 size_t size, unsigned int width)
1347{
1348 uint64_t addr = 0;
1349 size_t ret = 0;
1350 size_t len = BITS_PER_LONG / 4;
1351
1352 addr = he->mem_info->daddr.phys_addr;
1353
1354 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1355
1356 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1357
1358 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1359
1360 if (ret > width)
1361 bf[width] = '\0';
1362
1363 return width;
1364}
1365
1366struct sort_entry sort_mem_phys_daddr = {
1367 .se_header = "Data Physical Address",
1368 .se_cmp = sort__phys_daddr_cmp,
1369 .se_snprintf = hist_entry__phys_daddr_snprintf,
1370 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1371};
1372
1373static int64_t
1374sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1375{
1376 if (!left->branch_info || !right->branch_info)
1377 return cmp_null(left->branch_info, right->branch_info);
1378
1379 return left->branch_info->flags.abort !=
1380 right->branch_info->flags.abort;
1381}
1382
1383static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1384 size_t size, unsigned int width)
1385{
1386 static const char *out = "N/A";
1387
1388 if (he->branch_info) {
1389 if (he->branch_info->flags.abort)
1390 out = "A";
1391 else
1392 out = ".";
1393 }
1394
1395 return repsep_snprintf(bf, size, "%-*s", width, out);
1396}
1397
1398struct sort_entry sort_abort = {
1399 .se_header = "Transaction abort",
1400 .se_cmp = sort__abort_cmp,
1401 .se_snprintf = hist_entry__abort_snprintf,
1402 .se_width_idx = HISTC_ABORT,
1403};
1404
1405static int64_t
1406sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1407{
1408 if (!left->branch_info || !right->branch_info)
1409 return cmp_null(left->branch_info, right->branch_info);
1410
1411 return left->branch_info->flags.in_tx !=
1412 right->branch_info->flags.in_tx;
1413}
1414
1415static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1416 size_t size, unsigned int width)
1417{
1418 static const char *out = "N/A";
1419
1420 if (he->branch_info) {
1421 if (he->branch_info->flags.in_tx)
1422 out = "T";
1423 else
1424 out = ".";
1425 }
1426
1427 return repsep_snprintf(bf, size, "%-*s", width, out);
1428}
1429
1430struct sort_entry sort_in_tx = {
1431 .se_header = "Branch in transaction",
1432 .se_cmp = sort__in_tx_cmp,
1433 .se_snprintf = hist_entry__in_tx_snprintf,
1434 .se_width_idx = HISTC_IN_TX,
1435};
1436
1437static int64_t
1438sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1439{
1440 return left->transaction - right->transaction;
1441}
1442
1443static inline char *add_str(char *p, const char *str)
1444{
1445 strcpy(p, str);
1446 return p + strlen(str);
1447}
1448
1449static struct txbit {
1450 unsigned flag;
1451 const char *name;
1452 int skip_for_len;
1453} txbits[] = {
1454 { PERF_TXN_ELISION, "EL ", 0 },
1455 { PERF_TXN_TRANSACTION, "TX ", 1 },
1456 { PERF_TXN_SYNC, "SYNC ", 1 },
1457 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1458 { PERF_TXN_RETRY, "RETRY ", 0 },
1459 { PERF_TXN_CONFLICT, "CON ", 0 },
1460 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1461 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1462 { 0, NULL, 0 }
1463};
1464
1465int hist_entry__transaction_len(void)
1466{
1467 int i;
1468 int len = 0;
1469
1470 for (i = 0; txbits[i].name; i++) {
1471 if (!txbits[i].skip_for_len)
1472 len += strlen(txbits[i].name);
1473 }
1474 len += 4; /* :XX<space> */
1475 return len;
1476}
1477
1478static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1479 size_t size, unsigned int width)
1480{
1481 u64 t = he->transaction;
1482 char buf[128];
1483 char *p = buf;
1484 int i;
1485
1486 buf[0] = 0;
1487 for (i = 0; txbits[i].name; i++)
1488 if (txbits[i].flag & t)
1489 p = add_str(p, txbits[i].name);
1490 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1491 p = add_str(p, "NEITHER ");
1492 if (t & PERF_TXN_ABORT_MASK) {
1493 sprintf(p, ":%" PRIx64,
1494 (t & PERF_TXN_ABORT_MASK) >>
1495 PERF_TXN_ABORT_SHIFT);
1496 p += strlen(p);
1497 }
1498
1499 return repsep_snprintf(bf, size, "%-*s", width, buf);
1500}
1501
1502struct sort_entry sort_transaction = {
1503 .se_header = "Transaction ",
1504 .se_cmp = sort__transaction_cmp,
1505 .se_snprintf = hist_entry__transaction_snprintf,
1506 .se_width_idx = HISTC_TRANSACTION,
1507};
1508
1509/* --sort symbol_size */
1510
1511static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1512{
1513 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1514 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1515
1516 return size_l < size_r ? -1 :
1517 size_l == size_r ? 0 : 1;
1518}
1519
1520static int64_t
1521sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1522{
1523 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1524}
1525
1526static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1527 size_t bf_size, unsigned int width)
1528{
1529 if (sym)
1530 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1531
1532 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1533}
1534
1535static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1536 size_t size, unsigned int width)
1537{
1538 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1539}
1540
1541struct sort_entry sort_sym_size = {
1542 .se_header = "Symbol size",
1543 .se_cmp = sort__sym_size_cmp,
1544 .se_snprintf = hist_entry__sym_size_snprintf,
1545 .se_width_idx = HISTC_SYM_SIZE,
1546};
1547
1548/* --sort dso_size */
1549
1550static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1551{
1552 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1553 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1554
1555 return size_l < size_r ? -1 :
1556 size_l == size_r ? 0 : 1;
1557}
1558
1559static int64_t
1560sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1561{
1562 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1563}
1564
1565static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1566 size_t bf_size, unsigned int width)
1567{
1568 if (map && map->dso)
1569 return repsep_snprintf(bf, bf_size, "%*d", width,
1570 map__size(map));
1571
1572 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1573}
1574
1575static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1576 size_t size, unsigned int width)
1577{
1578 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1579}
1580
1581struct sort_entry sort_dso_size = {
1582 .se_header = "DSO size",
1583 .se_cmp = sort__dso_size_cmp,
1584 .se_snprintf = hist_entry__dso_size_snprintf,
1585 .se_width_idx = HISTC_DSO_SIZE,
1586};
1587
1588
1589struct sort_dimension {
1590 const char *name;
1591 struct sort_entry *entry;
1592 int taken;
1593};
1594
1595#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1596
1597static struct sort_dimension common_sort_dimensions[] = {
1598 DIM(SORT_PID, "pid", sort_thread),
1599 DIM(SORT_COMM, "comm", sort_comm),
1600 DIM(SORT_DSO, "dso", sort_dso),
1601 DIM(SORT_SYM, "symbol", sort_sym),
1602 DIM(SORT_PARENT, "parent", sort_parent),
1603 DIM(SORT_CPU, "cpu", sort_cpu),
1604 DIM(SORT_SOCKET, "socket", sort_socket),
1605 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1606 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1607 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1608 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1609 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1610 DIM(SORT_TRACE, "trace", sort_trace),
1611 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1612 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1613 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1614};
1615
1616#undef DIM
1617
1618#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1619
1620static struct sort_dimension bstack_sort_dimensions[] = {
1621 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1622 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1623 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1624 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1625 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1626 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1627 DIM(SORT_ABORT, "abort", sort_abort),
1628 DIM(SORT_CYCLES, "cycles", sort_cycles),
1629 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1630 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1631};
1632
1633#undef DIM
1634
1635#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1636
1637static struct sort_dimension memory_sort_dimensions[] = {
1638 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1639 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1640 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1641 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1642 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1643 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1644 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1645 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1646 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1647};
1648
1649#undef DIM
1650
1651struct hpp_dimension {
1652 const char *name;
1653 struct perf_hpp_fmt *fmt;
1654 int taken;
1655};
1656
1657#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1658
1659static struct hpp_dimension hpp_sort_dimensions[] = {
1660 DIM(PERF_HPP__OVERHEAD, "overhead"),
1661 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1662 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1663 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1664 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1665 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1666 DIM(PERF_HPP__SAMPLES, "sample"),
1667 DIM(PERF_HPP__PERIOD, "period"),
1668};
1669
1670#undef DIM
1671
1672struct hpp_sort_entry {
1673 struct perf_hpp_fmt hpp;
1674 struct sort_entry *se;
1675};
1676
1677void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1678{
1679 struct hpp_sort_entry *hse;
1680
1681 if (!perf_hpp__is_sort_entry(fmt))
1682 return;
1683
1684 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1685 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1686}
1687
1688static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1689 struct hists *hists, int line __maybe_unused,
1690 int *span __maybe_unused)
1691{
1692 struct hpp_sort_entry *hse;
1693 size_t len = fmt->user_len;
1694
1695 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1696
1697 if (!len)
1698 len = hists__col_len(hists, hse->se->se_width_idx);
1699
1700 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1701}
1702
1703static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1704 struct perf_hpp *hpp __maybe_unused,
1705 struct hists *hists)
1706{
1707 struct hpp_sort_entry *hse;
1708 size_t len = fmt->user_len;
1709
1710 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1711
1712 if (!len)
1713 len = hists__col_len(hists, hse->se->se_width_idx);
1714
1715 return len;
1716}
1717
1718static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1719 struct hist_entry *he)
1720{
1721 struct hpp_sort_entry *hse;
1722 size_t len = fmt->user_len;
1723
1724 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1725
1726 if (!len)
1727 len = hists__col_len(he->hists, hse->se->se_width_idx);
1728
1729 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1730}
1731
1732static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1733 struct hist_entry *a, struct hist_entry *b)
1734{
1735 struct hpp_sort_entry *hse;
1736
1737 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1738 return hse->se->se_cmp(a, b);
1739}
1740
1741static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1742 struct hist_entry *a, struct hist_entry *b)
1743{
1744 struct hpp_sort_entry *hse;
1745 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1746
1747 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1748 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1749 return collapse_fn(a, b);
1750}
1751
1752static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1753 struct hist_entry *a, struct hist_entry *b)
1754{
1755 struct hpp_sort_entry *hse;
1756 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1757
1758 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1759 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1760 return sort_fn(a, b);
1761}
1762
1763bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1764{
1765 return format->header == __sort__hpp_header;
1766}
1767
1768#define MK_SORT_ENTRY_CHK(key) \
1769bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1770{ \
1771 struct hpp_sort_entry *hse; \
1772 \
1773 if (!perf_hpp__is_sort_entry(fmt)) \
1774 return false; \
1775 \
1776 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1777 return hse->se == &sort_ ## key ; \
1778}
1779
1780MK_SORT_ENTRY_CHK(trace)
1781MK_SORT_ENTRY_CHK(srcline)
1782MK_SORT_ENTRY_CHK(srcfile)
1783MK_SORT_ENTRY_CHK(thread)
1784MK_SORT_ENTRY_CHK(comm)
1785MK_SORT_ENTRY_CHK(dso)
1786MK_SORT_ENTRY_CHK(sym)
1787
1788
1789static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1790{
1791 struct hpp_sort_entry *hse_a;
1792 struct hpp_sort_entry *hse_b;
1793
1794 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1795 return false;
1796
1797 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1798 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1799
1800 return hse_a->se == hse_b->se;
1801}
1802
1803static void hse_free(struct perf_hpp_fmt *fmt)
1804{
1805 struct hpp_sort_entry *hse;
1806
1807 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1808 free(hse);
1809}
1810
1811static struct hpp_sort_entry *
1812__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1813{
1814 struct hpp_sort_entry *hse;
1815
1816 hse = malloc(sizeof(*hse));
1817 if (hse == NULL) {
1818 pr_err("Memory allocation failed\n");
1819 return NULL;
1820 }
1821
1822 hse->se = sd->entry;
1823 hse->hpp.name = sd->entry->se_header;
1824 hse->hpp.header = __sort__hpp_header;
1825 hse->hpp.width = __sort__hpp_width;
1826 hse->hpp.entry = __sort__hpp_entry;
1827 hse->hpp.color = NULL;
1828
1829 hse->hpp.cmp = __sort__hpp_cmp;
1830 hse->hpp.collapse = __sort__hpp_collapse;
1831 hse->hpp.sort = __sort__hpp_sort;
1832 hse->hpp.equal = __sort__hpp_equal;
1833 hse->hpp.free = hse_free;
1834
1835 INIT_LIST_HEAD(&hse->hpp.list);
1836 INIT_LIST_HEAD(&hse->hpp.sort_list);
1837 hse->hpp.elide = false;
1838 hse->hpp.len = 0;
1839 hse->hpp.user_len = 0;
1840 hse->hpp.level = level;
1841
1842 return hse;
1843}
1844
1845static void hpp_free(struct perf_hpp_fmt *fmt)
1846{
1847 free(fmt);
1848}
1849
1850static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1851 int level)
1852{
1853 struct perf_hpp_fmt *fmt;
1854
1855 fmt = memdup(hd->fmt, sizeof(*fmt));
1856 if (fmt) {
1857 INIT_LIST_HEAD(&fmt->list);
1858 INIT_LIST_HEAD(&fmt->sort_list);
1859 fmt->free = hpp_free;
1860 fmt->level = level;
1861 }
1862
1863 return fmt;
1864}
1865
1866int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1867{
1868 struct perf_hpp_fmt *fmt;
1869 struct hpp_sort_entry *hse;
1870 int ret = -1;
1871 int r;
1872
1873 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1874 if (!perf_hpp__is_sort_entry(fmt))
1875 continue;
1876
1877 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1878 if (hse->se->se_filter == NULL)
1879 continue;
1880
1881 /*
1882 * hist entry is filtered if any of sort key in the hpp list
1883 * is applied. But it should skip non-matched filter types.
1884 */
1885 r = hse->se->se_filter(he, type, arg);
1886 if (r >= 0) {
1887 if (ret < 0)
1888 ret = 0;
1889 ret |= r;
1890 }
1891 }
1892
1893 return ret;
1894}
1895
1896static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1897 struct perf_hpp_list *list,
1898 int level)
1899{
1900 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1901
1902 if (hse == NULL)
1903 return -1;
1904
1905 perf_hpp_list__register_sort_field(list, &hse->hpp);
1906 return 0;
1907}
1908
1909static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1910 struct perf_hpp_list *list)
1911{
1912 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1913
1914 if (hse == NULL)
1915 return -1;
1916
1917 perf_hpp_list__column_register(list, &hse->hpp);
1918 return 0;
1919}
1920
1921struct hpp_dynamic_entry {
1922 struct perf_hpp_fmt hpp;
1923 struct perf_evsel *evsel;
1924 struct format_field *field;
1925 unsigned dynamic_len;
1926 bool raw_trace;
1927};
1928
1929static int hde_width(struct hpp_dynamic_entry *hde)
1930{
1931 if (!hde->hpp.len) {
1932 int len = hde->dynamic_len;
1933 int namelen = strlen(hde->field->name);
1934 int fieldlen = hde->field->size;
1935
1936 if (namelen > len)
1937 len = namelen;
1938
1939 if (!(hde->field->flags & FIELD_IS_STRING)) {
1940 /* length for print hex numbers */
1941 fieldlen = hde->field->size * 2 + 2;
1942 }
1943 if (fieldlen > len)
1944 len = fieldlen;
1945
1946 hde->hpp.len = len;
1947 }
1948 return hde->hpp.len;
1949}
1950
1951static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1952 struct hist_entry *he)
1953{
1954 char *str, *pos;
1955 struct format_field *field = hde->field;
1956 size_t namelen;
1957 bool last = false;
1958
1959 if (hde->raw_trace)
1960 return;
1961
1962 /* parse pretty print result and update max length */
1963 if (!he->trace_output)
1964 he->trace_output = get_trace_output(he);
1965
1966 namelen = strlen(field->name);
1967 str = he->trace_output;
1968
1969 while (str) {
1970 pos = strchr(str, ' ');
1971 if (pos == NULL) {
1972 last = true;
1973 pos = str + strlen(str);
1974 }
1975
1976 if (!strncmp(str, field->name, namelen)) {
1977 size_t len;
1978
1979 str += namelen + 1;
1980 len = pos - str;
1981
1982 if (len > hde->dynamic_len)
1983 hde->dynamic_len = len;
1984 break;
1985 }
1986
1987 if (last)
1988 str = NULL;
1989 else
1990 str = pos + 1;
1991 }
1992}
1993
1994static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1995 struct hists *hists __maybe_unused,
1996 int line __maybe_unused,
1997 int *span __maybe_unused)
1998{
1999 struct hpp_dynamic_entry *hde;
2000 size_t len = fmt->user_len;
2001
2002 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2003
2004 if (!len)
2005 len = hde_width(hde);
2006
2007 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2008}
2009
2010static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2011 struct perf_hpp *hpp __maybe_unused,
2012 struct hists *hists __maybe_unused)
2013{
2014 struct hpp_dynamic_entry *hde;
2015 size_t len = fmt->user_len;
2016
2017 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2018
2019 if (!len)
2020 len = hde_width(hde);
2021
2022 return len;
2023}
2024
2025bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2026{
2027 struct hpp_dynamic_entry *hde;
2028
2029 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2030
2031 return hists_to_evsel(hists) == hde->evsel;
2032}
2033
2034static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2035 struct hist_entry *he)
2036{
2037 struct hpp_dynamic_entry *hde;
2038 size_t len = fmt->user_len;
2039 char *str, *pos;
2040 struct format_field *field;
2041 size_t namelen;
2042 bool last = false;
2043 int ret;
2044
2045 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2046
2047 if (!len)
2048 len = hde_width(hde);
2049
2050 if (hde->raw_trace)
2051 goto raw_field;
2052
2053 if (!he->trace_output)
2054 he->trace_output = get_trace_output(he);
2055
2056 field = hde->field;
2057 namelen = strlen(field->name);
2058 str = he->trace_output;
2059
2060 while (str) {
2061 pos = strchr(str, ' ');
2062 if (pos == NULL) {
2063 last = true;
2064 pos = str + strlen(str);
2065 }
2066
2067 if (!strncmp(str, field->name, namelen)) {
2068 str += namelen + 1;
2069 str = strndup(str, pos - str);
2070
2071 if (str == NULL)
2072 return scnprintf(hpp->buf, hpp->size,
2073 "%*.*s", len, len, "ERROR");
2074 break;
2075 }
2076
2077 if (last)
2078 str = NULL;
2079 else
2080 str = pos + 1;
2081 }
2082
2083 if (str == NULL) {
2084 struct trace_seq seq;
2085raw_field:
2086 trace_seq_init(&seq);
2087 pevent_print_field(&seq, he->raw_data, hde->field);
2088 str = seq.buffer;
2089 }
2090
2091 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2092 free(str);
2093 return ret;
2094}
2095
2096static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2097 struct hist_entry *a, struct hist_entry *b)
2098{
2099 struct hpp_dynamic_entry *hde;
2100 struct format_field *field;
2101 unsigned offset, size;
2102
2103 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2104
2105 if (b == NULL) {
2106 update_dynamic_len(hde, a);
2107 return 0;
2108 }
2109
2110 field = hde->field;
2111 if (field->flags & FIELD_IS_DYNAMIC) {
2112 unsigned long long dyn;
2113
2114 pevent_read_number_field(field, a->raw_data, &dyn);
2115 offset = dyn & 0xffff;
2116 size = (dyn >> 16) & 0xffff;
2117
2118 /* record max width for output */
2119 if (size > hde->dynamic_len)
2120 hde->dynamic_len = size;
2121 } else {
2122 offset = field->offset;
2123 size = field->size;
2124 }
2125
2126 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2127}
2128
2129bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2130{
2131 return fmt->cmp == __sort__hde_cmp;
2132}
2133
2134static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2135{
2136 struct hpp_dynamic_entry *hde_a;
2137 struct hpp_dynamic_entry *hde_b;
2138
2139 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2140 return false;
2141
2142 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2143 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2144
2145 return hde_a->field == hde_b->field;
2146}
2147
2148static void hde_free(struct perf_hpp_fmt *fmt)
2149{
2150 struct hpp_dynamic_entry *hde;
2151
2152 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2153 free(hde);
2154}
2155
2156static struct hpp_dynamic_entry *
2157__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
2158 int level)
2159{
2160 struct hpp_dynamic_entry *hde;
2161
2162 hde = malloc(sizeof(*hde));
2163 if (hde == NULL) {
2164 pr_debug("Memory allocation failed\n");
2165 return NULL;
2166 }
2167
2168 hde->evsel = evsel;
2169 hde->field = field;
2170 hde->dynamic_len = 0;
2171
2172 hde->hpp.name = field->name;
2173 hde->hpp.header = __sort__hde_header;
2174 hde->hpp.width = __sort__hde_width;
2175 hde->hpp.entry = __sort__hde_entry;
2176 hde->hpp.color = NULL;
2177
2178 hde->hpp.cmp = __sort__hde_cmp;
2179 hde->hpp.collapse = __sort__hde_cmp;
2180 hde->hpp.sort = __sort__hde_cmp;
2181 hde->hpp.equal = __sort__hde_equal;
2182 hde->hpp.free = hde_free;
2183
2184 INIT_LIST_HEAD(&hde->hpp.list);
2185 INIT_LIST_HEAD(&hde->hpp.sort_list);
2186 hde->hpp.elide = false;
2187 hde->hpp.len = 0;
2188 hde->hpp.user_len = 0;
2189 hde->hpp.level = level;
2190
2191 return hde;
2192}
2193
2194struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2195{
2196 struct perf_hpp_fmt *new_fmt = NULL;
2197
2198 if (perf_hpp__is_sort_entry(fmt)) {
2199 struct hpp_sort_entry *hse, *new_hse;
2200
2201 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2202 new_hse = memdup(hse, sizeof(*hse));
2203 if (new_hse)
2204 new_fmt = &new_hse->hpp;
2205 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2206 struct hpp_dynamic_entry *hde, *new_hde;
2207
2208 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2209 new_hde = memdup(hde, sizeof(*hde));
2210 if (new_hde)
2211 new_fmt = &new_hde->hpp;
2212 } else {
2213 new_fmt = memdup(fmt, sizeof(*fmt));
2214 }
2215
2216 INIT_LIST_HEAD(&new_fmt->list);
2217 INIT_LIST_HEAD(&new_fmt->sort_list);
2218
2219 return new_fmt;
2220}
2221
2222static int parse_field_name(char *str, char **event, char **field, char **opt)
2223{
2224 char *event_name, *field_name, *opt_name;
2225
2226 event_name = str;
2227 field_name = strchr(str, '.');
2228
2229 if (field_name) {
2230 *field_name++ = '\0';
2231 } else {
2232 event_name = NULL;
2233 field_name = str;
2234 }
2235
2236 opt_name = strchr(field_name, '/');
2237 if (opt_name)
2238 *opt_name++ = '\0';
2239
2240 *event = event_name;
2241 *field = field_name;
2242 *opt = opt_name;
2243
2244 return 0;
2245}
2246
2247/* find match evsel using a given event name. The event name can be:
2248 * 1. '%' + event index (e.g. '%1' for first event)
2249 * 2. full event name (e.g. sched:sched_switch)
2250 * 3. partial event name (should not contain ':')
2251 */
2252static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
2253{
2254 struct perf_evsel *evsel = NULL;
2255 struct perf_evsel *pos;
2256 bool full_name;
2257
2258 /* case 1 */
2259 if (event_name[0] == '%') {
2260 int nr = strtol(event_name+1, NULL, 0);
2261
2262 if (nr > evlist->nr_entries)
2263 return NULL;
2264
2265 evsel = perf_evlist__first(evlist);
2266 while (--nr > 0)
2267 evsel = perf_evsel__next(evsel);
2268
2269 return evsel;
2270 }
2271
2272 full_name = !!strchr(event_name, ':');
2273 evlist__for_each_entry(evlist, pos) {
2274 /* case 2 */
2275 if (full_name && !strcmp(pos->name, event_name))
2276 return pos;
2277 /* case 3 */
2278 if (!full_name && strstr(pos->name, event_name)) {
2279 if (evsel) {
2280 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2281 event_name, evsel->name, pos->name);
2282 return NULL;
2283 }
2284 evsel = pos;
2285 }
2286 }
2287
2288 return evsel;
2289}
2290
2291static int __dynamic_dimension__add(struct perf_evsel *evsel,
2292 struct format_field *field,
2293 bool raw_trace, int level)
2294{
2295 struct hpp_dynamic_entry *hde;
2296
2297 hde = __alloc_dynamic_entry(evsel, field, level);
2298 if (hde == NULL)
2299 return -ENOMEM;
2300
2301 hde->raw_trace = raw_trace;
2302
2303 perf_hpp__register_sort_field(&hde->hpp);
2304 return 0;
2305}
2306
2307static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2308{
2309 int ret;
2310 struct format_field *field;
2311
2312 field = evsel->tp_format->format.fields;
2313 while (field) {
2314 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2315 if (ret < 0)
2316 return ret;
2317
2318 field = field->next;
2319 }
2320 return 0;
2321}
2322
2323static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2324 int level)
2325{
2326 int ret;
2327 struct perf_evsel *evsel;
2328
2329 evlist__for_each_entry(evlist, evsel) {
2330 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2331 continue;
2332
2333 ret = add_evsel_fields(evsel, raw_trace, level);
2334 if (ret < 0)
2335 return ret;
2336 }
2337 return 0;
2338}
2339
2340static int add_all_matching_fields(struct perf_evlist *evlist,
2341 char *field_name, bool raw_trace, int level)
2342{
2343 int ret = -ESRCH;
2344 struct perf_evsel *evsel;
2345 struct format_field *field;
2346
2347 evlist__for_each_entry(evlist, evsel) {
2348 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2349 continue;
2350
2351 field = pevent_find_any_field(evsel->tp_format, field_name);
2352 if (field == NULL)
2353 continue;
2354
2355 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2356 if (ret < 0)
2357 break;
2358 }
2359 return ret;
2360}
2361
2362static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2363 int level)
2364{
2365 char *str, *event_name, *field_name, *opt_name;
2366 struct perf_evsel *evsel;
2367 struct format_field *field;
2368 bool raw_trace = symbol_conf.raw_trace;
2369 int ret = 0;
2370
2371 if (evlist == NULL)
2372 return -ENOENT;
2373
2374 str = strdup(tok);
2375 if (str == NULL)
2376 return -ENOMEM;
2377
2378 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2379 ret = -EINVAL;
2380 goto out;
2381 }
2382
2383 if (opt_name) {
2384 if (strcmp(opt_name, "raw")) {
2385 pr_debug("unsupported field option %s\n", opt_name);
2386 ret = -EINVAL;
2387 goto out;
2388 }
2389 raw_trace = true;
2390 }
2391
2392 if (!strcmp(field_name, "trace_fields")) {
2393 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2394 goto out;
2395 }
2396
2397 if (event_name == NULL) {
2398 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2399 goto out;
2400 }
2401
2402 evsel = find_evsel(evlist, event_name);
2403 if (evsel == NULL) {
2404 pr_debug("Cannot find event: %s\n", event_name);
2405 ret = -ENOENT;
2406 goto out;
2407 }
2408
2409 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2410 pr_debug("%s is not a tracepoint event\n", event_name);
2411 ret = -EINVAL;
2412 goto out;
2413 }
2414
2415 if (!strcmp(field_name, "*")) {
2416 ret = add_evsel_fields(evsel, raw_trace, level);
2417 } else {
2418 field = pevent_find_any_field(evsel->tp_format, field_name);
2419 if (field == NULL) {
2420 pr_debug("Cannot find event field for %s.%s\n",
2421 event_name, field_name);
2422 return -ENOENT;
2423 }
2424
2425 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2426 }
2427
2428out:
2429 free(str);
2430 return ret;
2431}
2432
2433static int __sort_dimension__add(struct sort_dimension *sd,
2434 struct perf_hpp_list *list,
2435 int level)
2436{
2437 if (sd->taken)
2438 return 0;
2439
2440 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2441 return -1;
2442
2443 if (sd->entry->se_collapse)
2444 list->need_collapse = 1;
2445
2446 sd->taken = 1;
2447
2448 return 0;
2449}
2450
2451static int __hpp_dimension__add(struct hpp_dimension *hd,
2452 struct perf_hpp_list *list,
2453 int level)
2454{
2455 struct perf_hpp_fmt *fmt;
2456
2457 if (hd->taken)
2458 return 0;
2459
2460 fmt = __hpp_dimension__alloc_hpp(hd, level);
2461 if (!fmt)
2462 return -1;
2463
2464 hd->taken = 1;
2465 perf_hpp_list__register_sort_field(list, fmt);
2466 return 0;
2467}
2468
2469static int __sort_dimension__add_output(struct perf_hpp_list *list,
2470 struct sort_dimension *sd)
2471{
2472 if (sd->taken)
2473 return 0;
2474
2475 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2476 return -1;
2477
2478 sd->taken = 1;
2479 return 0;
2480}
2481
2482static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2483 struct hpp_dimension *hd)
2484{
2485 struct perf_hpp_fmt *fmt;
2486
2487 if (hd->taken)
2488 return 0;
2489
2490 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2491 if (!fmt)
2492 return -1;
2493
2494 hd->taken = 1;
2495 perf_hpp_list__column_register(list, fmt);
2496 return 0;
2497}
2498
2499int hpp_dimension__add_output(unsigned col)
2500{
2501 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2502 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2503}
2504
2505int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2506 struct perf_evlist *evlist,
2507 int level)
2508{
2509 unsigned int i;
2510
2511 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2512 struct sort_dimension *sd = &common_sort_dimensions[i];
2513
2514 if (strncasecmp(tok, sd->name, strlen(tok)))
2515 continue;
2516
2517 if (sd->entry == &sort_parent) {
2518 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2519 if (ret) {
2520 char err[BUFSIZ];
2521
2522 regerror(ret, &parent_regex, err, sizeof(err));
2523 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2524 return -EINVAL;
2525 }
2526 list->parent = 1;
2527 } else if (sd->entry == &sort_sym) {
2528 list->sym = 1;
2529 /*
2530 * perf diff displays the performance difference amongst
2531 * two or more perf.data files. Those files could come
2532 * from different binaries. So we should not compare
2533 * their ips, but the name of symbol.
2534 */
2535 if (sort__mode == SORT_MODE__DIFF)
2536 sd->entry->se_collapse = sort__sym_sort;
2537
2538 } else if (sd->entry == &sort_dso) {
2539 list->dso = 1;
2540 } else if (sd->entry == &sort_socket) {
2541 list->socket = 1;
2542 } else if (sd->entry == &sort_thread) {
2543 list->thread = 1;
2544 } else if (sd->entry == &sort_comm) {
2545 list->comm = 1;
2546 }
2547
2548 return __sort_dimension__add(sd, list, level);
2549 }
2550
2551 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2552 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2553
2554 if (strncasecmp(tok, hd->name, strlen(tok)))
2555 continue;
2556
2557 return __hpp_dimension__add(hd, list, level);
2558 }
2559
2560 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2561 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2562
2563 if (strncasecmp(tok, sd->name, strlen(tok)))
2564 continue;
2565
2566 if (sort__mode != SORT_MODE__BRANCH)
2567 return -EINVAL;
2568
2569 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2570 list->sym = 1;
2571
2572 __sort_dimension__add(sd, list, level);
2573 return 0;
2574 }
2575
2576 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2577 struct sort_dimension *sd = &memory_sort_dimensions[i];
2578
2579 if (strncasecmp(tok, sd->name, strlen(tok)))
2580 continue;
2581
2582 if (sort__mode != SORT_MODE__MEMORY)
2583 return -EINVAL;
2584
2585 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0)
2586 return -EINVAL;
2587
2588 if (sd->entry == &sort_mem_daddr_sym)
2589 list->sym = 1;
2590
2591 __sort_dimension__add(sd, list, level);
2592 return 0;
2593 }
2594
2595 if (!add_dynamic_entry(evlist, tok, level))
2596 return 0;
2597
2598 return -ESRCH;
2599}
2600
2601static int setup_sort_list(struct perf_hpp_list *list, char *str,
2602 struct perf_evlist *evlist)
2603{
2604 char *tmp, *tok;
2605 int ret = 0;
2606 int level = 0;
2607 int next_level = 1;
2608 bool in_group = false;
2609
2610 do {
2611 tok = str;
2612 tmp = strpbrk(str, "{}, ");
2613 if (tmp) {
2614 if (in_group)
2615 next_level = level;
2616 else
2617 next_level = level + 1;
2618
2619 if (*tmp == '{')
2620 in_group = true;
2621 else if (*tmp == '}')
2622 in_group = false;
2623
2624 *tmp = '\0';
2625 str = tmp + 1;
2626 }
2627
2628 if (*tok) {
2629 ret = sort_dimension__add(list, tok, evlist, level);
2630 if (ret == -EINVAL) {
2631 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
2632 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2633 else
2634 pr_err("Invalid --sort key: `%s'", tok);
2635 break;
2636 } else if (ret == -ESRCH) {
2637 pr_err("Unknown --sort key: `%s'", tok);
2638 break;
2639 }
2640 }
2641
2642 level = next_level;
2643 } while (tmp);
2644
2645 return ret;
2646}
2647
2648static const char *get_default_sort_order(struct perf_evlist *evlist)
2649{
2650 const char *default_sort_orders[] = {
2651 default_sort_order,
2652 default_branch_sort_order,
2653 default_mem_sort_order,
2654 default_top_sort_order,
2655 default_diff_sort_order,
2656 default_tracepoint_sort_order,
2657 };
2658 bool use_trace = true;
2659 struct perf_evsel *evsel;
2660
2661 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2662
2663 if (evlist == NULL || perf_evlist__empty(evlist))
2664 goto out_no_evlist;
2665
2666 evlist__for_each_entry(evlist, evsel) {
2667 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2668 use_trace = false;
2669 break;
2670 }
2671 }
2672
2673 if (use_trace) {
2674 sort__mode = SORT_MODE__TRACEPOINT;
2675 if (symbol_conf.raw_trace)
2676 return "trace_fields";
2677 }
2678out_no_evlist:
2679 return default_sort_orders[sort__mode];
2680}
2681
2682static int setup_sort_order(struct perf_evlist *evlist)
2683{
2684 char *new_sort_order;
2685
2686 /*
2687 * Append '+'-prefixed sort order to the default sort
2688 * order string.
2689 */
2690 if (!sort_order || is_strict_order(sort_order))
2691 return 0;
2692
2693 if (sort_order[1] == '\0') {
2694 pr_err("Invalid --sort key: `+'");
2695 return -EINVAL;
2696 }
2697
2698 /*
2699 * We allocate new sort_order string, but we never free it,
2700 * because it's checked over the rest of the code.
2701 */
2702 if (asprintf(&new_sort_order, "%s,%s",
2703 get_default_sort_order(evlist), sort_order + 1) < 0) {
2704 pr_err("Not enough memory to set up --sort");
2705 return -ENOMEM;
2706 }
2707
2708 sort_order = new_sort_order;
2709 return 0;
2710}
2711
2712/*
2713 * Adds 'pre,' prefix into 'str' is 'pre' is
2714 * not already part of 'str'.
2715 */
2716static char *prefix_if_not_in(const char *pre, char *str)
2717{
2718 char *n;
2719
2720 if (!str || strstr(str, pre))
2721 return str;
2722
2723 if (asprintf(&n, "%s,%s", pre, str) < 0)
2724 return NULL;
2725
2726 free(str);
2727 return n;
2728}
2729
2730static char *setup_overhead(char *keys)
2731{
2732 if (sort__mode == SORT_MODE__DIFF)
2733 return keys;
2734
2735 keys = prefix_if_not_in("overhead", keys);
2736
2737 if (symbol_conf.cumulate_callchain)
2738 keys = prefix_if_not_in("overhead_children", keys);
2739
2740 return keys;
2741}
2742
2743static int __setup_sorting(struct perf_evlist *evlist)
2744{
2745 char *str;
2746 const char *sort_keys;
2747 int ret = 0;
2748
2749 ret = setup_sort_order(evlist);
2750 if (ret)
2751 return ret;
2752
2753 sort_keys = sort_order;
2754 if (sort_keys == NULL) {
2755 if (is_strict_order(field_order)) {
2756 /*
2757 * If user specified field order but no sort order,
2758 * we'll honor it and not add default sort orders.
2759 */
2760 return 0;
2761 }
2762
2763 sort_keys = get_default_sort_order(evlist);
2764 }
2765
2766 str = strdup(sort_keys);
2767 if (str == NULL) {
2768 pr_err("Not enough memory to setup sort keys");
2769 return -ENOMEM;
2770 }
2771
2772 /*
2773 * Prepend overhead fields for backward compatibility.
2774 */
2775 if (!is_strict_order(field_order)) {
2776 str = setup_overhead(str);
2777 if (str == NULL) {
2778 pr_err("Not enough memory to setup overhead keys");
2779 return -ENOMEM;
2780 }
2781 }
2782
2783 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2784
2785 free(str);
2786 return ret;
2787}
2788
2789void perf_hpp__set_elide(int idx, bool elide)
2790{
2791 struct perf_hpp_fmt *fmt;
2792 struct hpp_sort_entry *hse;
2793
2794 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2795 if (!perf_hpp__is_sort_entry(fmt))
2796 continue;
2797
2798 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2799 if (hse->se->se_width_idx == idx) {
2800 fmt->elide = elide;
2801 break;
2802 }
2803 }
2804}
2805
2806static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2807{
2808 if (list && strlist__nr_entries(list) == 1) {
2809 if (fp != NULL)
2810 fprintf(fp, "# %s: %s\n", list_name,
2811 strlist__entry(list, 0)->s);
2812 return true;
2813 }
2814 return false;
2815}
2816
2817static bool get_elide(int idx, FILE *output)
2818{
2819 switch (idx) {
2820 case HISTC_SYMBOL:
2821 return __get_elide(symbol_conf.sym_list, "symbol", output);
2822 case HISTC_DSO:
2823 return __get_elide(symbol_conf.dso_list, "dso", output);
2824 case HISTC_COMM:
2825 return __get_elide(symbol_conf.comm_list, "comm", output);
2826 default:
2827 break;
2828 }
2829
2830 if (sort__mode != SORT_MODE__BRANCH)
2831 return false;
2832
2833 switch (idx) {
2834 case HISTC_SYMBOL_FROM:
2835 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2836 case HISTC_SYMBOL_TO:
2837 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2838 case HISTC_DSO_FROM:
2839 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2840 case HISTC_DSO_TO:
2841 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2842 default:
2843 break;
2844 }
2845
2846 return false;
2847}
2848
2849void sort__setup_elide(FILE *output)
2850{
2851 struct perf_hpp_fmt *fmt;
2852 struct hpp_sort_entry *hse;
2853
2854 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2855 if (!perf_hpp__is_sort_entry(fmt))
2856 continue;
2857
2858 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2859 fmt->elide = get_elide(hse->se->se_width_idx, output);
2860 }
2861
2862 /*
2863 * It makes no sense to elide all of sort entries.
2864 * Just revert them to show up again.
2865 */
2866 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2867 if (!perf_hpp__is_sort_entry(fmt))
2868 continue;
2869
2870 if (!fmt->elide)
2871 return;
2872 }
2873
2874 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2875 if (!perf_hpp__is_sort_entry(fmt))
2876 continue;
2877
2878 fmt->elide = false;
2879 }
2880}
2881
2882int output_field_add(struct perf_hpp_list *list, char *tok)
2883{
2884 unsigned int i;
2885
2886 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2887 struct sort_dimension *sd = &common_sort_dimensions[i];
2888
2889 if (strncasecmp(tok, sd->name, strlen(tok)))
2890 continue;
2891
2892 return __sort_dimension__add_output(list, sd);
2893 }
2894
2895 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2896 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2897
2898 if (strncasecmp(tok, hd->name, strlen(tok)))
2899 continue;
2900
2901 return __hpp_dimension__add_output(list, hd);
2902 }
2903
2904 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2905 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2906
2907 if (strncasecmp(tok, sd->name, strlen(tok)))
2908 continue;
2909
2910 return __sort_dimension__add_output(list, sd);
2911 }
2912
2913 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2914 struct sort_dimension *sd = &memory_sort_dimensions[i];
2915
2916 if (strncasecmp(tok, sd->name, strlen(tok)))
2917 continue;
2918
2919 return __sort_dimension__add_output(list, sd);
2920 }
2921
2922 return -ESRCH;
2923}
2924
2925static int setup_output_list(struct perf_hpp_list *list, char *str)
2926{
2927 char *tmp, *tok;
2928 int ret = 0;
2929
2930 for (tok = strtok_r(str, ", ", &tmp);
2931 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2932 ret = output_field_add(list, tok);
2933 if (ret == -EINVAL) {
2934 ui__error("Invalid --fields key: `%s'", tok);
2935 break;
2936 } else if (ret == -ESRCH) {
2937 ui__error("Unknown --fields key: `%s'", tok);
2938 break;
2939 }
2940 }
2941
2942 return ret;
2943}
2944
2945void reset_dimensions(void)
2946{
2947 unsigned int i;
2948
2949 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2950 common_sort_dimensions[i].taken = 0;
2951
2952 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2953 hpp_sort_dimensions[i].taken = 0;
2954
2955 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2956 bstack_sort_dimensions[i].taken = 0;
2957
2958 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2959 memory_sort_dimensions[i].taken = 0;
2960}
2961
2962bool is_strict_order(const char *order)
2963{
2964 return order && (*order != '+');
2965}
2966
2967static int __setup_output_field(void)
2968{
2969 char *str, *strp;
2970 int ret = -EINVAL;
2971
2972 if (field_order == NULL)
2973 return 0;
2974
2975 strp = str = strdup(field_order);
2976 if (str == NULL) {
2977 pr_err("Not enough memory to setup output fields");
2978 return -ENOMEM;
2979 }
2980
2981 if (!is_strict_order(field_order))
2982 strp++;
2983
2984 if (!strlen(strp)) {
2985 pr_err("Invalid --fields key: `+'");
2986 goto out;
2987 }
2988
2989 ret = setup_output_list(&perf_hpp_list, strp);
2990
2991out:
2992 free(str);
2993 return ret;
2994}
2995
2996int setup_sorting(struct perf_evlist *evlist)
2997{
2998 int err;
2999
3000 err = __setup_sorting(evlist);
3001 if (err < 0)
3002 return err;
3003
3004 if (parent_pattern != default_parent_pattern) {
3005 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3006 if (err < 0)
3007 return err;
3008 }
3009
3010 reset_dimensions();
3011
3012 /*
3013 * perf diff doesn't use default hpp output fields.
3014 */
3015 if (sort__mode != SORT_MODE__DIFF)
3016 perf_hpp__init();
3017
3018 err = __setup_output_field();
3019 if (err < 0)
3020 return err;
3021
3022 /* copy sort keys to output fields */
3023 perf_hpp__setup_output_field(&perf_hpp_list);
3024 /* and then copy output fields to sort keys */
3025 perf_hpp__append_sort_keys(&perf_hpp_list);
3026
3027 /* setup hists-specific output fields */
3028 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3029 return -1;
3030
3031 return 0;
3032}
3033
3034void reset_output_field(void)
3035{
3036 perf_hpp_list.need_collapse = 0;
3037 perf_hpp_list.parent = 0;
3038 perf_hpp_list.sym = 0;
3039 perf_hpp_list.dso = 0;
3040
3041 field_order = NULL;
3042 sort_order = NULL;
3043
3044 reset_dimensions();
3045 perf_hpp__reset_output_field(&perf_hpp_list);
3046}