Loading...
1/*
2 * builtin-stat.c
3 *
4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
6 *
7 * Sample output:
8
9 $ perf stat ./hackbench 10
10
11 Time: 0.118
12
13 Performance counter stats for './hackbench 10':
14
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
26
27 0.154822978 seconds time elapsed
28
29 *
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
31 *
32 * Improvements and fixes by:
33 *
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
40 *
41 * Released under the GPL v2. (and only v2, not any later version)
42 */
43
44#include "perf.h"
45#include "builtin.h"
46#include "util/util.h"
47#include "util/parse-options.h"
48#include "util/parse-events.h"
49#include "util/event.h"
50#include "util/evlist.h"
51#include "util/evsel.h"
52#include "util/debug.h"
53#include "util/color.h"
54#include "util/header.h"
55#include "util/cpumap.h"
56#include "util/thread.h"
57#include "util/thread_map.h"
58
59#include <sys/prctl.h>
60#include <math.h>
61#include <locale.h>
62
63#define DEFAULT_SEPARATOR " "
64#define CNTR_NOT_SUPPORTED "<not supported>"
65#define CNTR_NOT_COUNTED "<not counted>"
66
67static struct perf_event_attr default_attrs[] = {
68
69 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
70 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
71 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
72 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
73
74 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
75 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
76 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
77 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
78 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
79 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
80
81};
82
83/*
84 * Detailed stats (-d), covering the L1 and last level data caches:
85 */
86static struct perf_event_attr detailed_attrs[] = {
87
88 { .type = PERF_TYPE_HW_CACHE,
89 .config =
90 PERF_COUNT_HW_CACHE_L1D << 0 |
91 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
92 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
93
94 { .type = PERF_TYPE_HW_CACHE,
95 .config =
96 PERF_COUNT_HW_CACHE_L1D << 0 |
97 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
98 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
99
100 { .type = PERF_TYPE_HW_CACHE,
101 .config =
102 PERF_COUNT_HW_CACHE_LL << 0 |
103 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
104 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
105
106 { .type = PERF_TYPE_HW_CACHE,
107 .config =
108 PERF_COUNT_HW_CACHE_LL << 0 |
109 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
110 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
111};
112
113/*
114 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
115 */
116static struct perf_event_attr very_detailed_attrs[] = {
117
118 { .type = PERF_TYPE_HW_CACHE,
119 .config =
120 PERF_COUNT_HW_CACHE_L1I << 0 |
121 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
122 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
123
124 { .type = PERF_TYPE_HW_CACHE,
125 .config =
126 PERF_COUNT_HW_CACHE_L1I << 0 |
127 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
128 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
129
130 { .type = PERF_TYPE_HW_CACHE,
131 .config =
132 PERF_COUNT_HW_CACHE_DTLB << 0 |
133 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
134 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
135
136 { .type = PERF_TYPE_HW_CACHE,
137 .config =
138 PERF_COUNT_HW_CACHE_DTLB << 0 |
139 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
140 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
141
142 { .type = PERF_TYPE_HW_CACHE,
143 .config =
144 PERF_COUNT_HW_CACHE_ITLB << 0 |
145 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
146 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
147
148 { .type = PERF_TYPE_HW_CACHE,
149 .config =
150 PERF_COUNT_HW_CACHE_ITLB << 0 |
151 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
152 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
153
154};
155
156/*
157 * Very, very detailed stats (-d -d -d), adding prefetch events:
158 */
159static struct perf_event_attr very_very_detailed_attrs[] = {
160
161 { .type = PERF_TYPE_HW_CACHE,
162 .config =
163 PERF_COUNT_HW_CACHE_L1D << 0 |
164 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
165 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
166
167 { .type = PERF_TYPE_HW_CACHE,
168 .config =
169 PERF_COUNT_HW_CACHE_L1D << 0 |
170 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
171 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
172};
173
174
175
176static struct perf_evlist *evsel_list;
177
178static struct perf_target target = {
179 .uid = UINT_MAX,
180};
181
182static int run_idx = 0;
183static int run_count = 1;
184static bool no_inherit = false;
185static bool scale = true;
186static bool no_aggr = false;
187static pid_t child_pid = -1;
188static bool null_run = false;
189static int detailed_run = 0;
190static bool sync_run = false;
191static bool big_num = true;
192static int big_num_opt = -1;
193static const char *csv_sep = NULL;
194static bool csv_output = false;
195static bool group = false;
196static const char *output_name = NULL;
197static FILE *output = NULL;
198static int output_fd;
199
200static volatile int done = 0;
201
202struct stats
203{
204 double n, mean, M2;
205};
206
207struct perf_stat {
208 struct stats res_stats[3];
209};
210
211static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
212{
213 evsel->priv = zalloc(sizeof(struct perf_stat));
214 return evsel->priv == NULL ? -ENOMEM : 0;
215}
216
217static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
218{
219 free(evsel->priv);
220 evsel->priv = NULL;
221}
222
223static void update_stats(struct stats *stats, u64 val)
224{
225 double delta;
226
227 stats->n++;
228 delta = val - stats->mean;
229 stats->mean += delta / stats->n;
230 stats->M2 += delta*(val - stats->mean);
231}
232
233static double avg_stats(struct stats *stats)
234{
235 return stats->mean;
236}
237
238/*
239 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
240 *
241 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
242 * s^2 = -------------------------------
243 * n - 1
244 *
245 * http://en.wikipedia.org/wiki/Stddev
246 *
247 * The std dev of the mean is related to the std dev by:
248 *
249 * s
250 * s_mean = -------
251 * sqrt(n)
252 *
253 */
254static double stddev_stats(struct stats *stats)
255{
256 double variance, variance_mean;
257
258 if (!stats->n)
259 return 0.0;
260
261 variance = stats->M2 / (stats->n - 1);
262 variance_mean = variance / stats->n;
263
264 return sqrt(variance_mean);
265}
266
267static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
268static struct stats runtime_cycles_stats[MAX_NR_CPUS];
269static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
270static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
271static struct stats runtime_branches_stats[MAX_NR_CPUS];
272static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
273static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
274static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
275static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
276static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
277static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
278static struct stats walltime_nsecs_stats;
279
280static int create_perf_stat_counter(struct perf_evsel *evsel,
281 struct perf_evsel *first)
282{
283 struct perf_event_attr *attr = &evsel->attr;
284 struct xyarray *group_fd = NULL;
285 bool exclude_guest_missing = false;
286 int ret;
287
288 if (group && evsel != first)
289 group_fd = first->fd;
290
291 if (scale)
292 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
293 PERF_FORMAT_TOTAL_TIME_RUNNING;
294
295 attr->inherit = !no_inherit;
296
297retry:
298 if (exclude_guest_missing)
299 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
300
301 if (perf_target__has_cpu(&target)) {
302 ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
303 group, group_fd);
304 if (ret)
305 goto check_ret;
306 return 0;
307 }
308
309 if (!perf_target__has_task(&target) && (!group || evsel == first)) {
310 attr->disabled = 1;
311 attr->enable_on_exec = 1;
312 }
313
314 ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
315 group, group_fd);
316 if (!ret)
317 return 0;
318 /* fall through */
319check_ret:
320 if (ret && errno == EINVAL) {
321 if (!exclude_guest_missing &&
322 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
323 pr_debug("Old kernel, cannot exclude "
324 "guest or host samples.\n");
325 exclude_guest_missing = true;
326 goto retry;
327 }
328 }
329 return ret;
330}
331
332/*
333 * Does the counter have nsecs as a unit?
334 */
335static inline int nsec_counter(struct perf_evsel *evsel)
336{
337 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
338 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
339 return 1;
340
341 return 0;
342}
343
344/*
345 * Update various tracking values we maintain to print
346 * more semantic information such as miss/hit ratios,
347 * instruction rates, etc:
348 */
349static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
350{
351 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
352 update_stats(&runtime_nsecs_stats[0], count[0]);
353 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
354 update_stats(&runtime_cycles_stats[0], count[0]);
355 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
356 update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
357 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
358 update_stats(&runtime_stalled_cycles_back_stats[0], count[0]);
359 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
360 update_stats(&runtime_branches_stats[0], count[0]);
361 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
362 update_stats(&runtime_cacherefs_stats[0], count[0]);
363 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
364 update_stats(&runtime_l1_dcache_stats[0], count[0]);
365 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
366 update_stats(&runtime_l1_icache_stats[0], count[0]);
367 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
368 update_stats(&runtime_ll_cache_stats[0], count[0]);
369 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
370 update_stats(&runtime_dtlb_cache_stats[0], count[0]);
371 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
372 update_stats(&runtime_itlb_cache_stats[0], count[0]);
373}
374
375/*
376 * Read out the results of a single counter:
377 * aggregate counts across CPUs in system-wide mode
378 */
379static int read_counter_aggr(struct perf_evsel *counter)
380{
381 struct perf_stat *ps = counter->priv;
382 u64 *count = counter->counts->aggr.values;
383 int i;
384
385 if (__perf_evsel__read(counter, evsel_list->cpus->nr,
386 evsel_list->threads->nr, scale) < 0)
387 return -1;
388
389 for (i = 0; i < 3; i++)
390 update_stats(&ps->res_stats[i], count[i]);
391
392 if (verbose) {
393 fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
394 event_name(counter), count[0], count[1], count[2]);
395 }
396
397 /*
398 * Save the full runtime - to allow normalization during printout:
399 */
400 update_shadow_stats(counter, count);
401
402 return 0;
403}
404
405/*
406 * Read out the results of a single counter:
407 * do not aggregate counts across CPUs in system-wide mode
408 */
409static int read_counter(struct perf_evsel *counter)
410{
411 u64 *count;
412 int cpu;
413
414 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
415 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
416 return -1;
417
418 count = counter->counts->cpu[cpu].values;
419
420 update_shadow_stats(counter, count);
421 }
422
423 return 0;
424}
425
426static int run_perf_stat(int argc __used, const char **argv)
427{
428 unsigned long long t0, t1;
429 struct perf_evsel *counter, *first;
430 int status = 0;
431 int child_ready_pipe[2], go_pipe[2];
432 const bool forks = (argc > 0);
433 char buf;
434
435 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
436 perror("failed to create pipes");
437 exit(1);
438 }
439
440 if (forks) {
441 if ((child_pid = fork()) < 0)
442 perror("failed to fork");
443
444 if (!child_pid) {
445 close(child_ready_pipe[0]);
446 close(go_pipe[1]);
447 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
448
449 /*
450 * Do a dummy execvp to get the PLT entry resolved,
451 * so we avoid the resolver overhead on the real
452 * execvp call.
453 */
454 execvp("", (char **)argv);
455
456 /*
457 * Tell the parent we're ready to go
458 */
459 close(child_ready_pipe[1]);
460
461 /*
462 * Wait until the parent tells us to go.
463 */
464 if (read(go_pipe[0], &buf, 1) == -1)
465 perror("unable to read pipe");
466
467 execvp(argv[0], (char **)argv);
468
469 perror(argv[0]);
470 exit(-1);
471 }
472
473 if (perf_target__none(&target))
474 evsel_list->threads->map[0] = child_pid;
475
476 /*
477 * Wait for the child to be ready to exec.
478 */
479 close(child_ready_pipe[1]);
480 close(go_pipe[0]);
481 if (read(child_ready_pipe[0], &buf, 1) == -1)
482 perror("unable to read pipe");
483 close(child_ready_pipe[0]);
484 }
485
486 first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
487
488 list_for_each_entry(counter, &evsel_list->entries, node) {
489 if (create_perf_stat_counter(counter, first) < 0) {
490 /*
491 * PPC returns ENXIO for HW counters until 2.6.37
492 * (behavior changed with commit b0a873e).
493 */
494 if (errno == EINVAL || errno == ENOSYS ||
495 errno == ENOENT || errno == EOPNOTSUPP ||
496 errno == ENXIO) {
497 if (verbose)
498 ui__warning("%s event is not supported by the kernel.\n",
499 event_name(counter));
500 counter->supported = false;
501 continue;
502 }
503
504 if (errno == EPERM || errno == EACCES) {
505 error("You may not have permission to collect %sstats.\n"
506 "\t Consider tweaking"
507 " /proc/sys/kernel/perf_event_paranoid or running as root.",
508 target.system_wide ? "system-wide " : "");
509 } else {
510 error("open_counter returned with %d (%s). "
511 "/bin/dmesg may provide additional information.\n",
512 errno, strerror(errno));
513 }
514 if (child_pid != -1)
515 kill(child_pid, SIGTERM);
516 die("Not all events could be opened.\n");
517 return -1;
518 }
519 counter->supported = true;
520 }
521
522 if (perf_evlist__set_filters(evsel_list)) {
523 error("failed to set filter with %d (%s)\n", errno,
524 strerror(errno));
525 return -1;
526 }
527
528 /*
529 * Enable counters and exec the command:
530 */
531 t0 = rdclock();
532
533 if (forks) {
534 close(go_pipe[1]);
535 wait(&status);
536 if (WIFSIGNALED(status))
537 psignal(WTERMSIG(status), argv[0]);
538 } else {
539 while(!done) sleep(1);
540 }
541
542 t1 = rdclock();
543
544 update_stats(&walltime_nsecs_stats, t1 - t0);
545
546 if (no_aggr) {
547 list_for_each_entry(counter, &evsel_list->entries, node) {
548 read_counter(counter);
549 perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
550 }
551 } else {
552 list_for_each_entry(counter, &evsel_list->entries, node) {
553 read_counter_aggr(counter);
554 perf_evsel__close_fd(counter, evsel_list->cpus->nr,
555 evsel_list->threads->nr);
556 }
557 }
558
559 return WEXITSTATUS(status);
560}
561
562static void print_noise_pct(double total, double avg)
563{
564 double pct = 0.0;
565
566 if (avg)
567 pct = 100.0*total/avg;
568
569 if (csv_output)
570 fprintf(output, "%s%.2f%%", csv_sep, pct);
571 else if (pct)
572 fprintf(output, " ( +-%6.2f%% )", pct);
573}
574
575static void print_noise(struct perf_evsel *evsel, double avg)
576{
577 struct perf_stat *ps;
578
579 if (run_count == 1)
580 return;
581
582 ps = evsel->priv;
583 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
584}
585
586static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
587{
588 double msecs = avg / 1e6;
589 char cpustr[16] = { '\0', };
590 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s";
591
592 if (no_aggr)
593 sprintf(cpustr, "CPU%*d%s",
594 csv_output ? 0 : -4,
595 evsel_list->cpus->map[cpu], csv_sep);
596
597 fprintf(output, fmt, cpustr, msecs, csv_sep, event_name(evsel));
598
599 if (evsel->cgrp)
600 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
601
602 if (csv_output)
603 return;
604
605 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
606 fprintf(output, " # %8.3f CPUs utilized ",
607 avg / avg_stats(&walltime_nsecs_stats));
608 else
609 fprintf(output, " ");
610}
611
612/* used for get_ratio_color() */
613enum grc_type {
614 GRC_STALLED_CYCLES_FE,
615 GRC_STALLED_CYCLES_BE,
616 GRC_CACHE_MISSES,
617 GRC_MAX_NR
618};
619
620static const char *get_ratio_color(enum grc_type type, double ratio)
621{
622 static const double grc_table[GRC_MAX_NR][3] = {
623 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
624 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
625 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
626 };
627 const char *color = PERF_COLOR_NORMAL;
628
629 if (ratio > grc_table[type][0])
630 color = PERF_COLOR_RED;
631 else if (ratio > grc_table[type][1])
632 color = PERF_COLOR_MAGENTA;
633 else if (ratio > grc_table[type][2])
634 color = PERF_COLOR_YELLOW;
635
636 return color;
637}
638
639static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
640{
641 double total, ratio = 0.0;
642 const char *color;
643
644 total = avg_stats(&runtime_cycles_stats[cpu]);
645
646 if (total)
647 ratio = avg / total * 100.0;
648
649 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
650
651 fprintf(output, " # ");
652 color_fprintf(output, color, "%6.2f%%", ratio);
653 fprintf(output, " frontend cycles idle ");
654}
655
656static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
657{
658 double total, ratio = 0.0;
659 const char *color;
660
661 total = avg_stats(&runtime_cycles_stats[cpu]);
662
663 if (total)
664 ratio = avg / total * 100.0;
665
666 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
667
668 fprintf(output, " # ");
669 color_fprintf(output, color, "%6.2f%%", ratio);
670 fprintf(output, " backend cycles idle ");
671}
672
673static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
674{
675 double total, ratio = 0.0;
676 const char *color;
677
678 total = avg_stats(&runtime_branches_stats[cpu]);
679
680 if (total)
681 ratio = avg / total * 100.0;
682
683 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
684
685 fprintf(output, " # ");
686 color_fprintf(output, color, "%6.2f%%", ratio);
687 fprintf(output, " of all branches ");
688}
689
690static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
691{
692 double total, ratio = 0.0;
693 const char *color;
694
695 total = avg_stats(&runtime_l1_dcache_stats[cpu]);
696
697 if (total)
698 ratio = avg / total * 100.0;
699
700 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
701
702 fprintf(output, " # ");
703 color_fprintf(output, color, "%6.2f%%", ratio);
704 fprintf(output, " of all L1-dcache hits ");
705}
706
707static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
708{
709 double total, ratio = 0.0;
710 const char *color;
711
712 total = avg_stats(&runtime_l1_icache_stats[cpu]);
713
714 if (total)
715 ratio = avg / total * 100.0;
716
717 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
718
719 fprintf(output, " # ");
720 color_fprintf(output, color, "%6.2f%%", ratio);
721 fprintf(output, " of all L1-icache hits ");
722}
723
724static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
725{
726 double total, ratio = 0.0;
727 const char *color;
728
729 total = avg_stats(&runtime_dtlb_cache_stats[cpu]);
730
731 if (total)
732 ratio = avg / total * 100.0;
733
734 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
735
736 fprintf(output, " # ");
737 color_fprintf(output, color, "%6.2f%%", ratio);
738 fprintf(output, " of all dTLB cache hits ");
739}
740
741static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
742{
743 double total, ratio = 0.0;
744 const char *color;
745
746 total = avg_stats(&runtime_itlb_cache_stats[cpu]);
747
748 if (total)
749 ratio = avg / total * 100.0;
750
751 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
752
753 fprintf(output, " # ");
754 color_fprintf(output, color, "%6.2f%%", ratio);
755 fprintf(output, " of all iTLB cache hits ");
756}
757
758static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
759{
760 double total, ratio = 0.0;
761 const char *color;
762
763 total = avg_stats(&runtime_ll_cache_stats[cpu]);
764
765 if (total)
766 ratio = avg / total * 100.0;
767
768 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
769
770 fprintf(output, " # ");
771 color_fprintf(output, color, "%6.2f%%", ratio);
772 fprintf(output, " of all LL-cache hits ");
773}
774
775static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
776{
777 double total, ratio = 0.0;
778 char cpustr[16] = { '\0', };
779 const char *fmt;
780
781 if (csv_output)
782 fmt = "%s%.0f%s%s";
783 else if (big_num)
784 fmt = "%s%'18.0f%s%-25s";
785 else
786 fmt = "%s%18.0f%s%-25s";
787
788 if (no_aggr)
789 sprintf(cpustr, "CPU%*d%s",
790 csv_output ? 0 : -4,
791 evsel_list->cpus->map[cpu], csv_sep);
792 else
793 cpu = 0;
794
795 fprintf(output, fmt, cpustr, avg, csv_sep, event_name(evsel));
796
797 if (evsel->cgrp)
798 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
799
800 if (csv_output)
801 return;
802
803 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
804 total = avg_stats(&runtime_cycles_stats[cpu]);
805
806 if (total)
807 ratio = avg / total;
808
809 fprintf(output, " # %5.2f insns per cycle ", ratio);
810
811 total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
812 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
813
814 if (total && avg) {
815 ratio = total / avg;
816 fprintf(output, "\n # %5.2f stalled cycles per insn", ratio);
817 }
818
819 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
820 runtime_branches_stats[cpu].n != 0) {
821 print_branch_misses(cpu, evsel, avg);
822 } else if (
823 evsel->attr.type == PERF_TYPE_HW_CACHE &&
824 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
825 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
826 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
827 runtime_l1_dcache_stats[cpu].n != 0) {
828 print_l1_dcache_misses(cpu, evsel, avg);
829 } else if (
830 evsel->attr.type == PERF_TYPE_HW_CACHE &&
831 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
832 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
833 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
834 runtime_l1_icache_stats[cpu].n != 0) {
835 print_l1_icache_misses(cpu, evsel, avg);
836 } else if (
837 evsel->attr.type == PERF_TYPE_HW_CACHE &&
838 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
839 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
840 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
841 runtime_dtlb_cache_stats[cpu].n != 0) {
842 print_dtlb_cache_misses(cpu, evsel, avg);
843 } else if (
844 evsel->attr.type == PERF_TYPE_HW_CACHE &&
845 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
846 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
847 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
848 runtime_itlb_cache_stats[cpu].n != 0) {
849 print_itlb_cache_misses(cpu, evsel, avg);
850 } else if (
851 evsel->attr.type == PERF_TYPE_HW_CACHE &&
852 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
853 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
854 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
855 runtime_ll_cache_stats[cpu].n != 0) {
856 print_ll_cache_misses(cpu, evsel, avg);
857 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
858 runtime_cacherefs_stats[cpu].n != 0) {
859 total = avg_stats(&runtime_cacherefs_stats[cpu]);
860
861 if (total)
862 ratio = avg * 100 / total;
863
864 fprintf(output, " # %8.3f %% of all cache refs ", ratio);
865
866 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
867 print_stalled_cycles_frontend(cpu, evsel, avg);
868 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
869 print_stalled_cycles_backend(cpu, evsel, avg);
870 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
871 total = avg_stats(&runtime_nsecs_stats[cpu]);
872
873 if (total)
874 ratio = 1.0 * avg / total;
875
876 fprintf(output, " # %8.3f GHz ", ratio);
877 } else if (runtime_nsecs_stats[cpu].n != 0) {
878 char unit = 'M';
879
880 total = avg_stats(&runtime_nsecs_stats[cpu]);
881
882 if (total)
883 ratio = 1000.0 * avg / total;
884 if (ratio < 0.001) {
885 ratio *= 1000;
886 unit = 'K';
887 }
888
889 fprintf(output, " # %8.3f %c/sec ", ratio, unit);
890 } else {
891 fprintf(output, " ");
892 }
893}
894
895/*
896 * Print out the results of a single counter:
897 * aggregated counts in system-wide mode
898 */
899static void print_counter_aggr(struct perf_evsel *counter)
900{
901 struct perf_stat *ps = counter->priv;
902 double avg = avg_stats(&ps->res_stats[0]);
903 int scaled = counter->counts->scaled;
904
905 if (scaled == -1) {
906 fprintf(output, "%*s%s%*s",
907 csv_output ? 0 : 18,
908 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
909 csv_sep,
910 csv_output ? 0 : -24,
911 event_name(counter));
912
913 if (counter->cgrp)
914 fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
915
916 fputc('\n', output);
917 return;
918 }
919
920 if (nsec_counter(counter))
921 nsec_printout(-1, counter, avg);
922 else
923 abs_printout(-1, counter, avg);
924
925 print_noise(counter, avg);
926
927 if (csv_output) {
928 fputc('\n', output);
929 return;
930 }
931
932 if (scaled) {
933 double avg_enabled, avg_running;
934
935 avg_enabled = avg_stats(&ps->res_stats[1]);
936 avg_running = avg_stats(&ps->res_stats[2]);
937
938 fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled);
939 }
940 fprintf(output, "\n");
941}
942
943/*
944 * Print out the results of a single counter:
945 * does not use aggregated count in system-wide
946 */
947static void print_counter(struct perf_evsel *counter)
948{
949 u64 ena, run, val;
950 int cpu;
951
952 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
953 val = counter->counts->cpu[cpu].val;
954 ena = counter->counts->cpu[cpu].ena;
955 run = counter->counts->cpu[cpu].run;
956 if (run == 0 || ena == 0) {
957 fprintf(output, "CPU%*d%s%*s%s%*s",
958 csv_output ? 0 : -4,
959 evsel_list->cpus->map[cpu], csv_sep,
960 csv_output ? 0 : 18,
961 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
962 csv_sep,
963 csv_output ? 0 : -24,
964 event_name(counter));
965
966 if (counter->cgrp)
967 fprintf(output, "%s%s",
968 csv_sep, counter->cgrp->name);
969
970 fputc('\n', output);
971 continue;
972 }
973
974 if (nsec_counter(counter))
975 nsec_printout(cpu, counter, val);
976 else
977 abs_printout(cpu, counter, val);
978
979 if (!csv_output) {
980 print_noise(counter, 1.0);
981
982 if (run != ena)
983 fprintf(output, " (%.2f%%)",
984 100.0 * run / ena);
985 }
986 fputc('\n', output);
987 }
988}
989
990static void print_stat(int argc, const char **argv)
991{
992 struct perf_evsel *counter;
993 int i;
994
995 fflush(stdout);
996
997 if (!csv_output) {
998 fprintf(output, "\n");
999 fprintf(output, " Performance counter stats for ");
1000 if (!perf_target__has_task(&target)) {
1001 fprintf(output, "\'%s", argv[0]);
1002 for (i = 1; i < argc; i++)
1003 fprintf(output, " %s", argv[i]);
1004 } else if (target.pid)
1005 fprintf(output, "process id \'%s", target.pid);
1006 else
1007 fprintf(output, "thread id \'%s", target.tid);
1008
1009 fprintf(output, "\'");
1010 if (run_count > 1)
1011 fprintf(output, " (%d runs)", run_count);
1012 fprintf(output, ":\n\n");
1013 }
1014
1015 if (no_aggr) {
1016 list_for_each_entry(counter, &evsel_list->entries, node)
1017 print_counter(counter);
1018 } else {
1019 list_for_each_entry(counter, &evsel_list->entries, node)
1020 print_counter_aggr(counter);
1021 }
1022
1023 if (!csv_output) {
1024 if (!null_run)
1025 fprintf(output, "\n");
1026 fprintf(output, " %17.9f seconds time elapsed",
1027 avg_stats(&walltime_nsecs_stats)/1e9);
1028 if (run_count > 1) {
1029 fprintf(output, " ");
1030 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
1031 avg_stats(&walltime_nsecs_stats));
1032 }
1033 fprintf(output, "\n\n");
1034 }
1035}
1036
1037static volatile int signr = -1;
1038
1039static void skip_signal(int signo)
1040{
1041 if(child_pid == -1)
1042 done = 1;
1043
1044 signr = signo;
1045}
1046
1047static void sig_atexit(void)
1048{
1049 if (child_pid != -1)
1050 kill(child_pid, SIGTERM);
1051
1052 if (signr == -1)
1053 return;
1054
1055 signal(signr, SIG_DFL);
1056 kill(getpid(), signr);
1057}
1058
1059static const char * const stat_usage[] = {
1060 "perf stat [<options>] [<command>]",
1061 NULL
1062};
1063
1064static int stat__set_big_num(const struct option *opt __used,
1065 const char *s __used, int unset)
1066{
1067 big_num_opt = unset ? 0 : 1;
1068 return 0;
1069}
1070
1071static bool append_file;
1072
1073static const struct option options[] = {
1074 OPT_CALLBACK('e', "event", &evsel_list, "event",
1075 "event selector. use 'perf list' to list available events",
1076 parse_events_option),
1077 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1078 "event filter", parse_filter),
1079 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1080 "child tasks do not inherit counters"),
1081 OPT_STRING('p', "pid", &target.pid, "pid",
1082 "stat events on existing process id"),
1083 OPT_STRING('t', "tid", &target.tid, "tid",
1084 "stat events on existing thread id"),
1085 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1086 "system-wide collection from all CPUs"),
1087 OPT_BOOLEAN('g', "group", &group,
1088 "put the counters into a counter group"),
1089 OPT_BOOLEAN('c', "scale", &scale,
1090 "scale/normalize counters"),
1091 OPT_INCR('v', "verbose", &verbose,
1092 "be more verbose (show counter open errors, etc)"),
1093 OPT_INTEGER('r', "repeat", &run_count,
1094 "repeat command and print average + stddev (max: 100)"),
1095 OPT_BOOLEAN('n', "null", &null_run,
1096 "null run - dont start any counters"),
1097 OPT_INCR('d', "detailed", &detailed_run,
1098 "detailed run - start a lot of events"),
1099 OPT_BOOLEAN('S', "sync", &sync_run,
1100 "call sync() before starting a run"),
1101 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1102 "print large numbers with thousands\' separators",
1103 stat__set_big_num),
1104 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1105 "list of cpus to monitor in system-wide"),
1106 OPT_BOOLEAN('A', "no-aggr", &no_aggr,
1107 "disable CPU count aggregation"),
1108 OPT_STRING('x', "field-separator", &csv_sep, "separator",
1109 "print counts with custom separator"),
1110 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1111 "monitor event in cgroup name only",
1112 parse_cgroups),
1113 OPT_STRING('o', "output", &output_name, "file",
1114 "output file name"),
1115 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1116 OPT_INTEGER(0, "log-fd", &output_fd,
1117 "log output to fd, instead of stderr"),
1118 OPT_END()
1119};
1120
1121/*
1122 * Add default attributes, if there were no attributes specified or
1123 * if -d/--detailed, -d -d or -d -d -d is used:
1124 */
1125static int add_default_attributes(void)
1126{
1127 /* Set attrs if no event is selected and !null_run: */
1128 if (null_run)
1129 return 0;
1130
1131 if (!evsel_list->nr_entries) {
1132 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
1133 return -1;
1134 }
1135
1136 /* Detailed events get appended to the event list: */
1137
1138 if (detailed_run < 1)
1139 return 0;
1140
1141 /* Append detailed run extra attributes: */
1142 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1143 return -1;
1144
1145 if (detailed_run < 2)
1146 return 0;
1147
1148 /* Append very detailed run extra attributes: */
1149 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1150 return -1;
1151
1152 if (detailed_run < 3)
1153 return 0;
1154
1155 /* Append very, very detailed run extra attributes: */
1156 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1157}
1158
1159int cmd_stat(int argc, const char **argv, const char *prefix __used)
1160{
1161 struct perf_evsel *pos;
1162 int status = -ENOMEM;
1163 const char *mode;
1164
1165 setlocale(LC_ALL, "");
1166
1167 evsel_list = perf_evlist__new(NULL, NULL);
1168 if (evsel_list == NULL)
1169 return -ENOMEM;
1170
1171 argc = parse_options(argc, argv, options, stat_usage,
1172 PARSE_OPT_STOP_AT_NON_OPTION);
1173
1174 output = stderr;
1175 if (output_name && strcmp(output_name, "-"))
1176 output = NULL;
1177
1178 if (output_name && output_fd) {
1179 fprintf(stderr, "cannot use both --output and --log-fd\n");
1180 usage_with_options(stat_usage, options);
1181 }
1182
1183 if (output_fd < 0) {
1184 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1185 usage_with_options(stat_usage, options);
1186 }
1187
1188 if (!output) {
1189 struct timespec tm;
1190 mode = append_file ? "a" : "w";
1191
1192 output = fopen(output_name, mode);
1193 if (!output) {
1194 perror("failed to create output file");
1195 exit(-1);
1196 }
1197 clock_gettime(CLOCK_REALTIME, &tm);
1198 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1199 } else if (output_fd > 0) {
1200 mode = append_file ? "a" : "w";
1201 output = fdopen(output_fd, mode);
1202 if (!output) {
1203 perror("Failed opening logfd");
1204 return -errno;
1205 }
1206 }
1207
1208 if (csv_sep) {
1209 csv_output = true;
1210 if (!strcmp(csv_sep, "\\t"))
1211 csv_sep = "\t";
1212 } else
1213 csv_sep = DEFAULT_SEPARATOR;
1214
1215 /*
1216 * let the spreadsheet do the pretty-printing
1217 */
1218 if (csv_output) {
1219 /* User explicitly passed -B? */
1220 if (big_num_opt == 1) {
1221 fprintf(stderr, "-B option not supported with -x\n");
1222 usage_with_options(stat_usage, options);
1223 } else /* Nope, so disable big number formatting */
1224 big_num = false;
1225 } else if (big_num_opt == 0) /* User passed --no-big-num */
1226 big_num = false;
1227
1228 if (!argc && !perf_target__has_task(&target))
1229 usage_with_options(stat_usage, options);
1230 if (run_count <= 0)
1231 usage_with_options(stat_usage, options);
1232
1233 /* no_aggr, cgroup are for system-wide only */
1234 if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
1235 fprintf(stderr, "both cgroup and no-aggregation "
1236 "modes only available in system-wide mode\n");
1237
1238 usage_with_options(stat_usage, options);
1239 }
1240
1241 if (add_default_attributes())
1242 goto out;
1243
1244 perf_target__validate(&target);
1245
1246 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1247 if (perf_target__has_task(&target))
1248 pr_err("Problems finding threads of monitor\n");
1249 if (perf_target__has_cpu(&target))
1250 perror("failed to parse CPUs map");
1251
1252 usage_with_options(stat_usage, options);
1253 return -1;
1254 }
1255
1256 list_for_each_entry(pos, &evsel_list->entries, node) {
1257 if (perf_evsel__alloc_stat_priv(pos) < 0 ||
1258 perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0)
1259 goto out_free_fd;
1260 }
1261
1262 /*
1263 * We dont want to block the signals - that would cause
1264 * child tasks to inherit that and Ctrl-C would not work.
1265 * What we want is for Ctrl-C to work in the exec()-ed
1266 * task, but being ignored by perf stat itself:
1267 */
1268 atexit(sig_atexit);
1269 signal(SIGINT, skip_signal);
1270 signal(SIGALRM, skip_signal);
1271 signal(SIGABRT, skip_signal);
1272
1273 status = 0;
1274 for (run_idx = 0; run_idx < run_count; run_idx++) {
1275 if (run_count != 1 && verbose)
1276 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
1277 run_idx + 1);
1278
1279 if (sync_run)
1280 sync();
1281
1282 status = run_perf_stat(argc, argv);
1283 }
1284
1285 if (status != -1)
1286 print_stat(argc, argv);
1287out_free_fd:
1288 list_for_each_entry(pos, &evsel_list->entries, node)
1289 perf_evsel__free_stat_priv(pos);
1290 perf_evlist__delete_maps(evsel_list);
1291out:
1292 perf_evlist__delete(evsel_list);
1293 return status;
1294}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * builtin-stat.c
4 *
5 * Builtin stat command: Give a precise performance counters summary
6 * overview about any workload, CPU or specific PID.
7 *
8 * Sample output:
9
10 $ perf stat ./hackbench 10
11
12 Time: 0.118
13
14 Performance counter stats for './hackbench 10':
15
16 1708.761321 task-clock # 11.037 CPUs utilized
17 41,190 context-switches # 0.024 M/sec
18 6,735 CPU-migrations # 0.004 M/sec
19 17,318 page-faults # 0.010 M/sec
20 5,205,202,243 cycles # 3.046 GHz
21 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
22 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
23 2,603,501,247 instructions # 0.50 insns per cycle
24 # 1.48 stalled cycles per insn
25 484,357,498 branches # 283.455 M/sec
26 6,388,934 branch-misses # 1.32% of all branches
27
28 0.154822978 seconds time elapsed
29
30 *
31 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
32 *
33 * Improvements and fixes by:
34 *
35 * Arjan van de Ven <arjan@linux.intel.com>
36 * Yanmin Zhang <yanmin.zhang@intel.com>
37 * Wu Fengguang <fengguang.wu@intel.com>
38 * Mike Galbraith <efault@gmx.de>
39 * Paul Mackerras <paulus@samba.org>
40 * Jaswinder Singh Rajput <jaswinder@kernel.org>
41 */
42
43#include "builtin.h"
44#include "perf.h"
45#include "util/cgroup.h"
46#include <subcmd/parse-options.h>
47#include "util/parse-events.h"
48#include "util/pmu.h"
49#include "util/event.h"
50#include "util/evlist.h"
51#include "util/evsel.h"
52#include "util/debug.h"
53#include "util/color.h"
54#include "util/stat.h"
55#include "util/header.h"
56#include "util/cpumap.h"
57#include "util/thread_map.h"
58#include "util/counts.h"
59#include "util/group.h"
60#include "util/session.h"
61#include "util/tool.h"
62#include "util/string2.h"
63#include "util/metricgroup.h"
64#include "util/synthetic-events.h"
65#include "util/target.h"
66#include "util/time-utils.h"
67#include "util/top.h"
68#include "util/affinity.h"
69#include "util/pfm.h"
70#include "asm/bug.h"
71
72#include <linux/time64.h>
73#include <linux/zalloc.h>
74#include <api/fs/fs.h>
75#include <errno.h>
76#include <signal.h>
77#include <stdlib.h>
78#include <sys/prctl.h>
79#include <inttypes.h>
80#include <locale.h>
81#include <math.h>
82#include <sys/types.h>
83#include <sys/stat.h>
84#include <sys/wait.h>
85#include <unistd.h>
86#include <sys/time.h>
87#include <sys/resource.h>
88#include <linux/err.h>
89
90#include <linux/ctype.h>
91#include <perf/evlist.h>
92
93#define DEFAULT_SEPARATOR " "
94#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
95
96static void print_counters(struct timespec *ts, int argc, const char **argv);
97
98/* Default events used for perf stat -T */
99static const char *transaction_attrs = {
100 "task-clock,"
101 "{"
102 "instructions,"
103 "cycles,"
104 "cpu/cycles-t/,"
105 "cpu/tx-start/,"
106 "cpu/el-start/,"
107 "cpu/cycles-ct/"
108 "}"
109};
110
111/* More limited version when the CPU does not have all events. */
112static const char * transaction_limited_attrs = {
113 "task-clock,"
114 "{"
115 "instructions,"
116 "cycles,"
117 "cpu/cycles-t/,"
118 "cpu/tx-start/"
119 "}"
120};
121
122static const char * topdown_attrs[] = {
123 "topdown-total-slots",
124 "topdown-slots-retired",
125 "topdown-recovery-bubbles",
126 "topdown-fetch-bubbles",
127 "topdown-slots-issued",
128 NULL,
129};
130
131static const char *smi_cost_attrs = {
132 "{"
133 "msr/aperf/,"
134 "msr/smi/,"
135 "cycles"
136 "}"
137};
138
139static struct evlist *evsel_list;
140
141static struct target target = {
142 .uid = UINT_MAX,
143};
144
145#define METRIC_ONLY_LEN 20
146
147static volatile pid_t child_pid = -1;
148static int detailed_run = 0;
149static bool transaction_run;
150static bool topdown_run = false;
151static bool smi_cost = false;
152static bool smi_reset = false;
153static int big_num_opt = -1;
154static bool group = false;
155static const char *pre_cmd = NULL;
156static const char *post_cmd = NULL;
157static bool sync_run = false;
158static bool forever = false;
159static bool force_metric_only = false;
160static struct timespec ref_time;
161static bool append_file;
162static bool interval_count;
163static const char *output_name;
164static int output_fd;
165
166struct perf_stat {
167 bool record;
168 struct perf_data data;
169 struct perf_session *session;
170 u64 bytes_written;
171 struct perf_tool tool;
172 bool maps_allocated;
173 struct perf_cpu_map *cpus;
174 struct perf_thread_map *threads;
175 enum aggr_mode aggr_mode;
176};
177
178static struct perf_stat perf_stat;
179#define STAT_RECORD perf_stat.record
180
181static volatile int done = 0;
182
183static struct perf_stat_config stat_config = {
184 .aggr_mode = AGGR_GLOBAL,
185 .scale = true,
186 .unit_width = 4, /* strlen("unit") */
187 .run_count = 1,
188 .metric_only_len = METRIC_ONLY_LEN,
189 .walltime_nsecs_stats = &walltime_nsecs_stats,
190 .big_num = true,
191 .ctl_fd = -1,
192 .ctl_fd_ack = -1
193};
194
195static bool cpus_map_matched(struct evsel *a, struct evsel *b)
196{
197 if (!a->core.cpus && !b->core.cpus)
198 return true;
199
200 if (!a->core.cpus || !b->core.cpus)
201 return false;
202
203 if (a->core.cpus->nr != b->core.cpus->nr)
204 return false;
205
206 for (int i = 0; i < a->core.cpus->nr; i++) {
207 if (a->core.cpus->map[i] != b->core.cpus->map[i])
208 return false;
209 }
210
211 return true;
212}
213
214static void evlist__check_cpu_maps(struct evlist *evlist)
215{
216 struct evsel *evsel, *pos, *leader;
217 char buf[1024];
218
219 evlist__for_each_entry(evlist, evsel) {
220 leader = evsel->leader;
221
222 /* Check that leader matches cpus with each member. */
223 if (leader == evsel)
224 continue;
225 if (cpus_map_matched(leader, evsel))
226 continue;
227
228 /* If there's mismatch disable the group and warn user. */
229 WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n");
230 evsel__group_desc(leader, buf, sizeof(buf));
231 pr_warning(" %s\n", buf);
232
233 if (verbose) {
234 cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
235 pr_warning(" %s: %s\n", leader->name, buf);
236 cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
237 pr_warning(" %s: %s\n", evsel->name, buf);
238 }
239
240 for_each_group_evsel(pos, leader) {
241 pos->leader = pos;
242 pos->core.nr_members = 0;
243 }
244 evsel->leader->core.nr_members = 0;
245 }
246}
247
248static inline void diff_timespec(struct timespec *r, struct timespec *a,
249 struct timespec *b)
250{
251 r->tv_sec = a->tv_sec - b->tv_sec;
252 if (a->tv_nsec < b->tv_nsec) {
253 r->tv_nsec = a->tv_nsec + NSEC_PER_SEC - b->tv_nsec;
254 r->tv_sec--;
255 } else {
256 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
257 }
258}
259
260static void perf_stat__reset_stats(void)
261{
262 int i;
263
264 perf_evlist__reset_stats(evsel_list);
265 perf_stat__reset_shadow_stats();
266
267 for (i = 0; i < stat_config.stats_num; i++)
268 perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
269}
270
271static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
272 union perf_event *event,
273 struct perf_sample *sample __maybe_unused,
274 struct machine *machine __maybe_unused)
275{
276 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
277 pr_err("failed to write perf data, error: %m\n");
278 return -1;
279 }
280
281 perf_stat.bytes_written += event->header.size;
282 return 0;
283}
284
285static int write_stat_round_event(u64 tm, u64 type)
286{
287 return perf_event__synthesize_stat_round(NULL, tm, type,
288 process_synthesized_event,
289 NULL);
290}
291
292#define WRITE_STAT_ROUND_EVENT(time, interval) \
293 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
294
295#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
296
297static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
298 struct perf_counts_values *count)
299{
300 struct perf_sample_id *sid = SID(counter, cpu, thread);
301
302 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
303 process_synthesized_event, NULL);
304}
305
306static int read_single_counter(struct evsel *counter, int cpu,
307 int thread, struct timespec *rs)
308{
309 if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
310 u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
311 struct perf_counts_values *count =
312 perf_counts(counter->counts, cpu, thread);
313 count->ena = count->run = val;
314 count->val = val;
315 return 0;
316 }
317 return evsel__read_counter(counter, cpu, thread);
318}
319
320/*
321 * Read out the results of a single counter:
322 * do not aggregate counts across CPUs in system-wide mode
323 */
324static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
325{
326 int nthreads = perf_thread_map__nr(evsel_list->core.threads);
327 int thread;
328
329 if (!counter->supported)
330 return -ENOENT;
331
332 if (counter->core.system_wide)
333 nthreads = 1;
334
335 for (thread = 0; thread < nthreads; thread++) {
336 struct perf_counts_values *count;
337
338 count = perf_counts(counter->counts, cpu, thread);
339
340 /*
341 * The leader's group read loads data into its group members
342 * (via evsel__read_counter()) and sets their count->loaded.
343 */
344 if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
345 read_single_counter(counter, cpu, thread, rs)) {
346 counter->counts->scaled = -1;
347 perf_counts(counter->counts, cpu, thread)->ena = 0;
348 perf_counts(counter->counts, cpu, thread)->run = 0;
349 return -1;
350 }
351
352 perf_counts__set_loaded(counter->counts, cpu, thread, false);
353
354 if (STAT_RECORD) {
355 if (evsel__write_stat_event(counter, cpu, thread, count)) {
356 pr_err("failed to write stat event\n");
357 return -1;
358 }
359 }
360
361 if (verbose > 1) {
362 fprintf(stat_config.output,
363 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
364 evsel__name(counter),
365 cpu,
366 count->val, count->ena, count->run);
367 }
368 }
369
370 return 0;
371}
372
373static int read_affinity_counters(struct timespec *rs)
374{
375 struct evsel *counter;
376 struct affinity affinity;
377 int i, ncpus, cpu;
378
379 if (affinity__setup(&affinity) < 0)
380 return -1;
381
382 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
383 if (!target__has_cpu(&target) || target__has_per_thread(&target))
384 ncpus = 1;
385 evlist__for_each_cpu(evsel_list, i, cpu) {
386 if (i >= ncpus)
387 break;
388 affinity__set(&affinity, cpu);
389
390 evlist__for_each_entry(evsel_list, counter) {
391 if (evsel__cpu_iter_skip(counter, cpu))
392 continue;
393 if (!counter->err) {
394 counter->err = read_counter_cpu(counter, rs,
395 counter->cpu_iter - 1);
396 }
397 }
398 }
399 affinity__cleanup(&affinity);
400 return 0;
401}
402
403static void read_counters(struct timespec *rs)
404{
405 struct evsel *counter;
406
407 if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0))
408 return;
409
410 evlist__for_each_entry(evsel_list, counter) {
411 if (counter->err)
412 pr_debug("failed to read counter %s\n", counter->name);
413 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter))
414 pr_warning("failed to process counter %s\n", counter->name);
415 counter->err = 0;
416 }
417}
418
419static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
420{
421 int i;
422
423 config->stats = calloc(nthreads, sizeof(struct runtime_stat));
424 if (!config->stats)
425 return -1;
426
427 config->stats_num = nthreads;
428
429 for (i = 0; i < nthreads; i++)
430 runtime_stat__init(&config->stats[i]);
431
432 return 0;
433}
434
435static void runtime_stat_delete(struct perf_stat_config *config)
436{
437 int i;
438
439 if (!config->stats)
440 return;
441
442 for (i = 0; i < config->stats_num; i++)
443 runtime_stat__exit(&config->stats[i]);
444
445 zfree(&config->stats);
446}
447
448static void runtime_stat_reset(struct perf_stat_config *config)
449{
450 int i;
451
452 if (!config->stats)
453 return;
454
455 for (i = 0; i < config->stats_num; i++)
456 perf_stat__reset_shadow_per_stat(&config->stats[i]);
457}
458
459static void process_interval(void)
460{
461 struct timespec ts, rs;
462
463 clock_gettime(CLOCK_MONOTONIC, &ts);
464 diff_timespec(&rs, &ts, &ref_time);
465
466 perf_stat__reset_shadow_per_stat(&rt_stat);
467 runtime_stat_reset(&stat_config);
468 read_counters(&rs);
469
470 if (STAT_RECORD) {
471 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
472 pr_err("failed to write stat round event\n");
473 }
474
475 init_stats(&walltime_nsecs_stats);
476 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
477 print_counters(&rs, 0, NULL);
478}
479
480static bool handle_interval(unsigned int interval, int *times)
481{
482 if (interval) {
483 process_interval();
484 if (interval_count && !(--(*times)))
485 return true;
486 }
487 return false;
488}
489
490static void enable_counters(void)
491{
492 if (stat_config.initial_delay < 0) {
493 pr_info(EVLIST_DISABLED_MSG);
494 return;
495 }
496
497 if (stat_config.initial_delay > 0) {
498 pr_info(EVLIST_DISABLED_MSG);
499 usleep(stat_config.initial_delay * USEC_PER_MSEC);
500 }
501
502 /*
503 * We need to enable counters only if:
504 * - we don't have tracee (attaching to task or cpu)
505 * - we have initial delay configured
506 */
507 if (!target__none(&target) || stat_config.initial_delay) {
508 evlist__enable(evsel_list);
509 if (stat_config.initial_delay > 0)
510 pr_info(EVLIST_ENABLED_MSG);
511 }
512}
513
514static void disable_counters(void)
515{
516 /*
517 * If we don't have tracee (attaching to task or cpu), counters may
518 * still be running. To get accurate group ratios, we must stop groups
519 * from counting before reading their constituent counters.
520 */
521 if (!target__none(&target))
522 evlist__disable(evsel_list);
523}
524
525static volatile int workload_exec_errno;
526
527/*
528 * perf_evlist__prepare_workload will send a SIGUSR1
529 * if the fork fails, since we asked by setting its
530 * want_signal to true.
531 */
532static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
533 void *ucontext __maybe_unused)
534{
535 workload_exec_errno = info->si_value.sival_int;
536}
537
538static bool evsel__should_store_id(struct evsel *counter)
539{
540 return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
541}
542
543static bool is_target_alive(struct target *_target,
544 struct perf_thread_map *threads)
545{
546 struct stat st;
547 int i;
548
549 if (!target__has_task(_target))
550 return true;
551
552 for (i = 0; i < threads->nr; i++) {
553 char path[PATH_MAX];
554
555 scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
556 threads->map[i].pid);
557
558 if (!stat(path, &st))
559 return true;
560 }
561
562 return false;
563}
564
565static void process_evlist(struct evlist *evlist, unsigned int interval)
566{
567 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
568
569 if (evlist__ctlfd_process(evlist, &cmd) > 0) {
570 switch (cmd) {
571 case EVLIST_CTL_CMD_ENABLE:
572 pr_info(EVLIST_ENABLED_MSG);
573 if (interval)
574 process_interval();
575 break;
576 case EVLIST_CTL_CMD_DISABLE:
577 if (interval)
578 process_interval();
579 pr_info(EVLIST_DISABLED_MSG);
580 break;
581 case EVLIST_CTL_CMD_ACK:
582 case EVLIST_CTL_CMD_UNSUPPORTED:
583 default:
584 break;
585 }
586 }
587}
588
589static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
590 int *time_to_sleep)
591{
592 int tts = *time_to_sleep;
593 struct timespec time_diff;
594
595 diff_timespec(&time_diff, time_stop, time_start);
596
597 tts -= time_diff.tv_sec * MSEC_PER_SEC +
598 time_diff.tv_nsec / NSEC_PER_MSEC;
599
600 if (tts < 0)
601 tts = 0;
602
603 *time_to_sleep = tts;
604}
605
606static int dispatch_events(bool forks, int timeout, int interval, int *times)
607{
608 int child_exited = 0, status = 0;
609 int time_to_sleep, sleep_time;
610 struct timespec time_start, time_stop;
611
612 if (interval)
613 sleep_time = interval;
614 else if (timeout)
615 sleep_time = timeout;
616 else
617 sleep_time = 1000;
618
619 time_to_sleep = sleep_time;
620
621 while (!done) {
622 if (forks)
623 child_exited = waitpid(child_pid, &status, WNOHANG);
624 else
625 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
626
627 if (child_exited)
628 break;
629
630 clock_gettime(CLOCK_MONOTONIC, &time_start);
631 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
632 if (timeout || handle_interval(interval, times))
633 break;
634 time_to_sleep = sleep_time;
635 } else { /* fd revent */
636 process_evlist(evsel_list, interval);
637 clock_gettime(CLOCK_MONOTONIC, &time_stop);
638 compute_tts(&time_start, &time_stop, &time_to_sleep);
639 }
640 }
641
642 return status;
643}
644
645enum counter_recovery {
646 COUNTER_SKIP,
647 COUNTER_RETRY,
648 COUNTER_FATAL,
649};
650
651static enum counter_recovery stat_handle_error(struct evsel *counter)
652{
653 char msg[BUFSIZ];
654 /*
655 * PPC returns ENXIO for HW counters until 2.6.37
656 * (behavior changed with commit b0a873e).
657 */
658 if (errno == EINVAL || errno == ENOSYS ||
659 errno == ENOENT || errno == EOPNOTSUPP ||
660 errno == ENXIO) {
661 if (verbose > 0)
662 ui__warning("%s event is not supported by the kernel.\n",
663 evsel__name(counter));
664 counter->supported = false;
665 /*
666 * errored is a sticky flag that means one of the counter's
667 * cpu event had a problem and needs to be reexamined.
668 */
669 counter->errored = true;
670
671 if ((counter->leader != counter) ||
672 !(counter->leader->core.nr_members > 1))
673 return COUNTER_SKIP;
674 } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
675 if (verbose > 0)
676 ui__warning("%s\n", msg);
677 return COUNTER_RETRY;
678 } else if (target__has_per_thread(&target) &&
679 evsel_list->core.threads &&
680 evsel_list->core.threads->err_thread != -1) {
681 /*
682 * For global --per-thread case, skip current
683 * error thread.
684 */
685 if (!thread_map__remove(evsel_list->core.threads,
686 evsel_list->core.threads->err_thread)) {
687 evsel_list->core.threads->err_thread = -1;
688 return COUNTER_RETRY;
689 }
690 }
691
692 evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
693 ui__error("%s\n", msg);
694
695 if (child_pid != -1)
696 kill(child_pid, SIGTERM);
697 return COUNTER_FATAL;
698}
699
700static int __run_perf_stat(int argc, const char **argv, int run_idx)
701{
702 int interval = stat_config.interval;
703 int times = stat_config.times;
704 int timeout = stat_config.timeout;
705 char msg[BUFSIZ];
706 unsigned long long t0, t1;
707 struct evsel *counter;
708 size_t l;
709 int status = 0;
710 const bool forks = (argc > 0);
711 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
712 struct affinity affinity;
713 int i, cpu;
714 bool second_pass = false;
715
716 if (forks) {
717 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
718 workload_exec_failed_signal) < 0) {
719 perror("failed to prepare workload");
720 return -1;
721 }
722 child_pid = evsel_list->workload.pid;
723 }
724
725 if (group)
726 perf_evlist__set_leader(evsel_list);
727
728 if (affinity__setup(&affinity) < 0)
729 return -1;
730
731 evlist__for_each_cpu (evsel_list, i, cpu) {
732 affinity__set(&affinity, cpu);
733
734 evlist__for_each_entry(evsel_list, counter) {
735 if (evsel__cpu_iter_skip(counter, cpu))
736 continue;
737 if (counter->reset_group || counter->errored)
738 continue;
739try_again:
740 if (create_perf_stat_counter(counter, &stat_config, &target,
741 counter->cpu_iter - 1) < 0) {
742
743 /*
744 * Weak group failed. We cannot just undo this here
745 * because earlier CPUs might be in group mode, and the kernel
746 * doesn't support mixing group and non group reads. Defer
747 * it to later.
748 * Don't close here because we're in the wrong affinity.
749 */
750 if ((errno == EINVAL || errno == EBADF) &&
751 counter->leader != counter &&
752 counter->weak_group) {
753 perf_evlist__reset_weak_group(evsel_list, counter, false);
754 assert(counter->reset_group);
755 second_pass = true;
756 continue;
757 }
758
759 switch (stat_handle_error(counter)) {
760 case COUNTER_FATAL:
761 return -1;
762 case COUNTER_RETRY:
763 goto try_again;
764 case COUNTER_SKIP:
765 continue;
766 default:
767 break;
768 }
769
770 }
771 counter->supported = true;
772 }
773 }
774
775 if (second_pass) {
776 /*
777 * Now redo all the weak group after closing them,
778 * and also close errored counters.
779 */
780
781 evlist__for_each_cpu(evsel_list, i, cpu) {
782 affinity__set(&affinity, cpu);
783 /* First close errored or weak retry */
784 evlist__for_each_entry(evsel_list, counter) {
785 if (!counter->reset_group && !counter->errored)
786 continue;
787 if (evsel__cpu_iter_skip_no_inc(counter, cpu))
788 continue;
789 perf_evsel__close_cpu(&counter->core, counter->cpu_iter);
790 }
791 /* Now reopen weak */
792 evlist__for_each_entry(evsel_list, counter) {
793 if (!counter->reset_group && !counter->errored)
794 continue;
795 if (evsel__cpu_iter_skip(counter, cpu))
796 continue;
797 if (!counter->reset_group)
798 continue;
799try_again_reset:
800 pr_debug2("reopening weak %s\n", evsel__name(counter));
801 if (create_perf_stat_counter(counter, &stat_config, &target,
802 counter->cpu_iter - 1) < 0) {
803
804 switch (stat_handle_error(counter)) {
805 case COUNTER_FATAL:
806 return -1;
807 case COUNTER_RETRY:
808 goto try_again_reset;
809 case COUNTER_SKIP:
810 continue;
811 default:
812 break;
813 }
814 }
815 counter->supported = true;
816 }
817 }
818 }
819 affinity__cleanup(&affinity);
820
821 evlist__for_each_entry(evsel_list, counter) {
822 if (!counter->supported) {
823 perf_evsel__free_fd(&counter->core);
824 continue;
825 }
826
827 l = strlen(counter->unit);
828 if (l > stat_config.unit_width)
829 stat_config.unit_width = l;
830
831 if (evsel__should_store_id(counter) &&
832 evsel__store_ids(counter, evsel_list))
833 return -1;
834 }
835
836 if (perf_evlist__apply_filters(evsel_list, &counter)) {
837 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
838 counter->filter, evsel__name(counter), errno,
839 str_error_r(errno, msg, sizeof(msg)));
840 return -1;
841 }
842
843 if (STAT_RECORD) {
844 int err, fd = perf_data__fd(&perf_stat.data);
845
846 if (is_pipe) {
847 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
848 } else {
849 err = perf_session__write_header(perf_stat.session, evsel_list,
850 fd, false);
851 }
852
853 if (err < 0)
854 return err;
855
856 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
857 process_synthesized_event, is_pipe);
858 if (err < 0)
859 return err;
860 }
861
862 /*
863 * Enable counters and exec the command:
864 */
865 t0 = rdclock();
866 clock_gettime(CLOCK_MONOTONIC, &ref_time);
867
868 if (forks) {
869 perf_evlist__start_workload(evsel_list);
870 enable_counters();
871
872 if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
873 status = dispatch_events(forks, timeout, interval, ×);
874 if (child_pid != -1) {
875 if (timeout)
876 kill(child_pid, SIGTERM);
877 wait4(child_pid, &status, 0, &stat_config.ru_data);
878 }
879
880 if (workload_exec_errno) {
881 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
882 pr_err("Workload failed: %s\n", emsg);
883 return -1;
884 }
885
886 if (WIFSIGNALED(status))
887 psignal(WTERMSIG(status), argv[0]);
888 } else {
889 enable_counters();
890 status = dispatch_events(forks, timeout, interval, ×);
891 }
892
893 disable_counters();
894
895 t1 = rdclock();
896
897 if (stat_config.walltime_run_table)
898 stat_config.walltime_run[run_idx] = t1 - t0;
899
900 if (interval && stat_config.summary) {
901 stat_config.interval = 0;
902 stat_config.stop_read_counter = true;
903 init_stats(&walltime_nsecs_stats);
904 update_stats(&walltime_nsecs_stats, t1 - t0);
905
906 if (stat_config.aggr_mode == AGGR_GLOBAL)
907 perf_evlist__save_aggr_prev_raw_counts(evsel_list);
908
909 perf_evlist__copy_prev_raw_counts(evsel_list);
910 perf_evlist__reset_prev_raw_counts(evsel_list);
911 runtime_stat_reset(&stat_config);
912 perf_stat__reset_shadow_per_stat(&rt_stat);
913 } else
914 update_stats(&walltime_nsecs_stats, t1 - t0);
915
916 /*
917 * Closing a group leader splits the group, and as we only disable
918 * group leaders, results in remaining events becoming enabled. To
919 * avoid arbitrary skew, we must read all counters before closing any
920 * group leaders.
921 */
922 read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
923
924 /*
925 * We need to keep evsel_list alive, because it's processed
926 * later the evsel_list will be closed after.
927 */
928 if (!STAT_RECORD)
929 evlist__close(evsel_list);
930
931 return WEXITSTATUS(status);
932}
933
934static int run_perf_stat(int argc, const char **argv, int run_idx)
935{
936 int ret;
937
938 if (pre_cmd) {
939 ret = system(pre_cmd);
940 if (ret)
941 return ret;
942 }
943
944 if (sync_run)
945 sync();
946
947 ret = __run_perf_stat(argc, argv, run_idx);
948 if (ret)
949 return ret;
950
951 if (post_cmd) {
952 ret = system(post_cmd);
953 if (ret)
954 return ret;
955 }
956
957 return ret;
958}
959
960static void print_counters(struct timespec *ts, int argc, const char **argv)
961{
962 /* Do not print anything if we record to the pipe. */
963 if (STAT_RECORD && perf_stat.data.is_pipe)
964 return;
965
966 perf_evlist__print_counters(evsel_list, &stat_config, &target,
967 ts, argc, argv);
968}
969
970static volatile int signr = -1;
971
972static void skip_signal(int signo)
973{
974 if ((child_pid == -1) || stat_config.interval)
975 done = 1;
976
977 signr = signo;
978 /*
979 * render child_pid harmless
980 * won't send SIGTERM to a random
981 * process in case of race condition
982 * and fast PID recycling
983 */
984 child_pid = -1;
985}
986
987static void sig_atexit(void)
988{
989 sigset_t set, oset;
990
991 /*
992 * avoid race condition with SIGCHLD handler
993 * in skip_signal() which is modifying child_pid
994 * goal is to avoid send SIGTERM to a random
995 * process
996 */
997 sigemptyset(&set);
998 sigaddset(&set, SIGCHLD);
999 sigprocmask(SIG_BLOCK, &set, &oset);
1000
1001 if (child_pid != -1)
1002 kill(child_pid, SIGTERM);
1003
1004 sigprocmask(SIG_SETMASK, &oset, NULL);
1005
1006 if (signr == -1)
1007 return;
1008
1009 signal(signr, SIG_DFL);
1010 kill(getpid(), signr);
1011}
1012
1013void perf_stat__set_big_num(int set)
1014{
1015 stat_config.big_num = (set != 0);
1016}
1017
1018static int stat__set_big_num(const struct option *opt __maybe_unused,
1019 const char *s __maybe_unused, int unset)
1020{
1021 big_num_opt = unset ? 0 : 1;
1022 perf_stat__set_big_num(!unset);
1023 return 0;
1024}
1025
1026static int enable_metric_only(const struct option *opt __maybe_unused,
1027 const char *s __maybe_unused, int unset)
1028{
1029 force_metric_only = true;
1030 stat_config.metric_only = !unset;
1031 return 0;
1032}
1033
1034static int parse_metric_groups(const struct option *opt,
1035 const char *str,
1036 int unset __maybe_unused)
1037{
1038 return metricgroup__parse_groups(opt, str,
1039 stat_config.metric_no_group,
1040 stat_config.metric_no_merge,
1041 &stat_config.metric_events);
1042}
1043
1044static int parse_control_option(const struct option *opt,
1045 const char *str,
1046 int unset __maybe_unused)
1047{
1048 char *comma = NULL, *endptr = NULL;
1049 struct perf_stat_config *config = (struct perf_stat_config *)opt->value;
1050
1051 if (strncmp(str, "fd:", 3))
1052 return -EINVAL;
1053
1054 config->ctl_fd = strtoul(&str[3], &endptr, 0);
1055 if (endptr == &str[3])
1056 return -EINVAL;
1057
1058 comma = strchr(str, ',');
1059 if (comma) {
1060 if (endptr != comma)
1061 return -EINVAL;
1062
1063 config->ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
1064 if (endptr == comma + 1 || *endptr != '\0')
1065 return -EINVAL;
1066 }
1067
1068 return 0;
1069}
1070
1071static struct option stat_options[] = {
1072 OPT_BOOLEAN('T', "transaction", &transaction_run,
1073 "hardware transaction statistics"),
1074 OPT_CALLBACK('e', "event", &evsel_list, "event",
1075 "event selector. use 'perf list' to list available events",
1076 parse_events_option),
1077 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1078 "event filter", parse_filter),
1079 OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
1080 "child tasks do not inherit counters"),
1081 OPT_STRING('p', "pid", &target.pid, "pid",
1082 "stat events on existing process id"),
1083 OPT_STRING('t', "tid", &target.tid, "tid",
1084 "stat events on existing thread id"),
1085 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1086 "system-wide collection from all CPUs"),
1087 OPT_BOOLEAN('g', "group", &group,
1088 "put the counters into a counter group"),
1089 OPT_BOOLEAN(0, "scale", &stat_config.scale,
1090 "Use --no-scale to disable counter scaling for multiplexing"),
1091 OPT_INCR('v', "verbose", &verbose,
1092 "be more verbose (show counter open errors, etc)"),
1093 OPT_INTEGER('r', "repeat", &stat_config.run_count,
1094 "repeat command and print average + stddev (max: 100, forever: 0)"),
1095 OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
1096 "display details about each run (only with -r option)"),
1097 OPT_BOOLEAN('n', "null", &stat_config.null_run,
1098 "null run - dont start any counters"),
1099 OPT_INCR('d', "detailed", &detailed_run,
1100 "detailed run - start a lot of events"),
1101 OPT_BOOLEAN('S', "sync", &sync_run,
1102 "call sync() before starting a run"),
1103 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1104 "print large numbers with thousands\' separators",
1105 stat__set_big_num),
1106 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1107 "list of cpus to monitor in system-wide"),
1108 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1109 "disable CPU count aggregation", AGGR_NONE),
1110 OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
1111 OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
1112 "print counts with custom separator"),
1113 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1114 "monitor event in cgroup name only", parse_cgroups),
1115 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1116 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1117 OPT_INTEGER(0, "log-fd", &output_fd,
1118 "log output to fd, instead of stderr"),
1119 OPT_STRING(0, "pre", &pre_cmd, "command",
1120 "command to run prior to the measured command"),
1121 OPT_STRING(0, "post", &post_cmd, "command",
1122 "command to run after to the measured command"),
1123 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1124 "print counts at regular interval in ms "
1125 "(overhead is possible for values <= 100ms)"),
1126 OPT_INTEGER(0, "interval-count", &stat_config.times,
1127 "print counts for fixed number of times"),
1128 OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
1129 "clear screen in between new interval"),
1130 OPT_UINTEGER(0, "timeout", &stat_config.timeout,
1131 "stop workload and print counts after a timeout period in ms (>= 10ms)"),
1132 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1133 "aggregate counts per processor socket", AGGR_SOCKET),
1134 OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
1135 "aggregate counts per processor die", AGGR_DIE),
1136 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1137 "aggregate counts per physical processor core", AGGR_CORE),
1138 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1139 "aggregate counts per thread", AGGR_THREAD),
1140 OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
1141 "aggregate counts per numa node", AGGR_NODE),
1142 OPT_INTEGER('D', "delay", &stat_config.initial_delay,
1143 "ms to wait before starting measurement after program start (-1: start with events disabled)"),
1144 OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
1145 "Only print computed metrics. No raw values", enable_metric_only),
1146 OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
1147 "don't group metric events, impacts multiplexing"),
1148 OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
1149 "don't try to share events between metrics in a group"),
1150 OPT_BOOLEAN(0, "topdown", &topdown_run,
1151 "measure topdown level 1 statistics"),
1152 OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1153 "measure SMI cost"),
1154 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1155 "monitor specified metrics or metric groups (separated by ,)",
1156 parse_metric_groups),
1157 OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
1158 "Configure all used events to run in kernel space.",
1159 PARSE_OPT_EXCLUSIVE),
1160 OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user,
1161 "Configure all used events to run in user space.",
1162 PARSE_OPT_EXCLUSIVE),
1163 OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread,
1164 "Use with 'percore' event qualifier to show the event "
1165 "counts of one hardware thread by sum up total hardware "
1166 "threads of same physical core"),
1167 OPT_BOOLEAN(0, "summary", &stat_config.summary,
1168 "print summary for interval mode"),
1169#ifdef HAVE_LIBPFM
1170 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1171 "libpfm4 event selector. use 'perf list' to list available events",
1172 parse_libpfm_events_option),
1173#endif
1174 OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd]",
1175 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
1176 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.",
1177 parse_control_option),
1178 OPT_END()
1179};
1180
1181static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
1182 struct perf_cpu_map *map, int cpu)
1183{
1184 return cpu_map__get_socket(map, cpu, NULL);
1185}
1186
1187static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
1188 struct perf_cpu_map *map, int cpu)
1189{
1190 return cpu_map__get_die(map, cpu, NULL);
1191}
1192
1193static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
1194 struct perf_cpu_map *map, int cpu)
1195{
1196 return cpu_map__get_core(map, cpu, NULL);
1197}
1198
1199static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
1200 struct perf_cpu_map *map, int cpu)
1201{
1202 return cpu_map__get_node(map, cpu, NULL);
1203}
1204
1205static int perf_stat__get_aggr(struct perf_stat_config *config,
1206 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
1207{
1208 int cpu;
1209
1210 if (idx >= map->nr)
1211 return -1;
1212
1213 cpu = map->map[idx];
1214
1215 if (config->cpus_aggr_map->map[cpu] == -1)
1216 config->cpus_aggr_map->map[cpu] = get_id(config, map, idx);
1217
1218 return config->cpus_aggr_map->map[cpu];
1219}
1220
1221static int perf_stat__get_socket_cached(struct perf_stat_config *config,
1222 struct perf_cpu_map *map, int idx)
1223{
1224 return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
1225}
1226
1227static int perf_stat__get_die_cached(struct perf_stat_config *config,
1228 struct perf_cpu_map *map, int idx)
1229{
1230 return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
1231}
1232
1233static int perf_stat__get_core_cached(struct perf_stat_config *config,
1234 struct perf_cpu_map *map, int idx)
1235{
1236 return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
1237}
1238
1239static int perf_stat__get_node_cached(struct perf_stat_config *config,
1240 struct perf_cpu_map *map, int idx)
1241{
1242 return perf_stat__get_aggr(config, perf_stat__get_node, map, idx);
1243}
1244
1245static bool term_percore_set(void)
1246{
1247 struct evsel *counter;
1248
1249 evlist__for_each_entry(evsel_list, counter) {
1250 if (counter->percore)
1251 return true;
1252 }
1253
1254 return false;
1255}
1256
1257static int perf_stat_init_aggr_mode(void)
1258{
1259 int nr;
1260
1261 switch (stat_config.aggr_mode) {
1262 case AGGR_SOCKET:
1263 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1264 perror("cannot build socket map");
1265 return -1;
1266 }
1267 stat_config.aggr_get_id = perf_stat__get_socket_cached;
1268 break;
1269 case AGGR_DIE:
1270 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1271 perror("cannot build die map");
1272 return -1;
1273 }
1274 stat_config.aggr_get_id = perf_stat__get_die_cached;
1275 break;
1276 case AGGR_CORE:
1277 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1278 perror("cannot build core map");
1279 return -1;
1280 }
1281 stat_config.aggr_get_id = perf_stat__get_core_cached;
1282 break;
1283 case AGGR_NODE:
1284 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1285 perror("cannot build core map");
1286 return -1;
1287 }
1288 stat_config.aggr_get_id = perf_stat__get_node_cached;
1289 break;
1290 case AGGR_NONE:
1291 if (term_percore_set()) {
1292 if (cpu_map__build_core_map(evsel_list->core.cpus,
1293 &stat_config.aggr_map)) {
1294 perror("cannot build core map");
1295 return -1;
1296 }
1297 stat_config.aggr_get_id = perf_stat__get_core_cached;
1298 }
1299 break;
1300 case AGGR_GLOBAL:
1301 case AGGR_THREAD:
1302 case AGGR_UNSET:
1303 default:
1304 break;
1305 }
1306
1307 /*
1308 * The evsel_list->cpus is the base we operate on,
1309 * taking the highest cpu number to be the size of
1310 * the aggregation translate cpumap.
1311 */
1312 nr = perf_cpu_map__max(evsel_list->core.cpus);
1313 stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1);
1314 return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
1315}
1316
1317static void perf_stat__exit_aggr_mode(void)
1318{
1319 perf_cpu_map__put(stat_config.aggr_map);
1320 perf_cpu_map__put(stat_config.cpus_aggr_map);
1321 stat_config.aggr_map = NULL;
1322 stat_config.cpus_aggr_map = NULL;
1323}
1324
1325static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx)
1326{
1327 int cpu;
1328
1329 if (idx > map->nr)
1330 return -1;
1331
1332 cpu = map->map[idx];
1333
1334 if (cpu >= env->nr_cpus_avail)
1335 return -1;
1336
1337 return cpu;
1338}
1339
1340static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data)
1341{
1342 struct perf_env *env = data;
1343 int cpu = perf_env__get_cpu(env, map, idx);
1344
1345 return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
1346}
1347
1348static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data)
1349{
1350 struct perf_env *env = data;
1351 int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
1352
1353 if (cpu != -1) {
1354 /*
1355 * Encode socket in bit range 15:8
1356 * die_id is relative to socket,
1357 * we need a global id. So we combine
1358 * socket + die id
1359 */
1360 if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
1361 return -1;
1362
1363 if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
1364 return -1;
1365
1366 die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff);
1367 }
1368
1369 return die_id;
1370}
1371
1372static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
1373{
1374 struct perf_env *env = data;
1375 int core = -1, cpu = perf_env__get_cpu(env, map, idx);
1376
1377 if (cpu != -1) {
1378 /*
1379 * Encode socket in bit range 31:24
1380 * encode die id in bit range 23:16
1381 * core_id is relative to socket and die,
1382 * we need a global id. So we combine
1383 * socket + die id + core id
1384 */
1385 if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
1386 return -1;
1387
1388 if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
1389 return -1;
1390
1391 if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n"))
1392 return -1;
1393
1394 core = (env->cpu[cpu].socket_id << 24) |
1395 (env->cpu[cpu].die_id << 16) |
1396 (env->cpu[cpu].core_id & 0xffff);
1397 }
1398
1399 return core;
1400}
1401
1402static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data)
1403{
1404 int cpu = perf_env__get_cpu(data, map, idx);
1405
1406 return perf_env__numa_node(data, cpu);
1407}
1408
1409static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
1410 struct perf_cpu_map **sockp)
1411{
1412 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
1413}
1414
1415static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus,
1416 struct perf_cpu_map **diep)
1417{
1418 return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
1419}
1420
1421static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus,
1422 struct perf_cpu_map **corep)
1423{
1424 return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
1425}
1426
1427static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus,
1428 struct perf_cpu_map **nodep)
1429{
1430 return cpu_map__build_map(cpus, nodep, perf_env__get_node, env);
1431}
1432
1433static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
1434 struct perf_cpu_map *map, int idx)
1435{
1436 return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
1437}
1438static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
1439 struct perf_cpu_map *map, int idx)
1440{
1441 return perf_env__get_die(map, idx, &perf_stat.session->header.env);
1442}
1443
1444static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
1445 struct perf_cpu_map *map, int idx)
1446{
1447 return perf_env__get_core(map, idx, &perf_stat.session->header.env);
1448}
1449
1450static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
1451 struct perf_cpu_map *map, int idx)
1452{
1453 return perf_env__get_node(map, idx, &perf_stat.session->header.env);
1454}
1455
1456static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
1457{
1458 struct perf_env *env = &st->session->header.env;
1459
1460 switch (stat_config.aggr_mode) {
1461 case AGGR_SOCKET:
1462 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1463 perror("cannot build socket map");
1464 return -1;
1465 }
1466 stat_config.aggr_get_id = perf_stat__get_socket_file;
1467 break;
1468 case AGGR_DIE:
1469 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1470 perror("cannot build die map");
1471 return -1;
1472 }
1473 stat_config.aggr_get_id = perf_stat__get_die_file;
1474 break;
1475 case AGGR_CORE:
1476 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1477 perror("cannot build core map");
1478 return -1;
1479 }
1480 stat_config.aggr_get_id = perf_stat__get_core_file;
1481 break;
1482 case AGGR_NODE:
1483 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1484 perror("cannot build core map");
1485 return -1;
1486 }
1487 stat_config.aggr_get_id = perf_stat__get_node_file;
1488 break;
1489 case AGGR_NONE:
1490 case AGGR_GLOBAL:
1491 case AGGR_THREAD:
1492 case AGGR_UNSET:
1493 default:
1494 break;
1495 }
1496
1497 return 0;
1498}
1499
1500static int topdown_filter_events(const char **attr, char **str, bool use_group)
1501{
1502 int off = 0;
1503 int i;
1504 int len = 0;
1505 char *s;
1506
1507 for (i = 0; attr[i]; i++) {
1508 if (pmu_have_event("cpu", attr[i])) {
1509 len += strlen(attr[i]) + 1;
1510 attr[i - off] = attr[i];
1511 } else
1512 off++;
1513 }
1514 attr[i - off] = NULL;
1515
1516 *str = malloc(len + 1 + 2);
1517 if (!*str)
1518 return -1;
1519 s = *str;
1520 if (i - off == 0) {
1521 *s = 0;
1522 return 0;
1523 }
1524 if (use_group)
1525 *s++ = '{';
1526 for (i = 0; attr[i]; i++) {
1527 strcpy(s, attr[i]);
1528 s += strlen(s);
1529 *s++ = ',';
1530 }
1531 if (use_group) {
1532 s[-1] = '}';
1533 *s = 0;
1534 } else
1535 s[-1] = 0;
1536 return 0;
1537}
1538
1539__weak bool arch_topdown_check_group(bool *warn)
1540{
1541 *warn = false;
1542 return false;
1543}
1544
1545__weak void arch_topdown_group_warn(void)
1546{
1547}
1548
1549/*
1550 * Add default attributes, if there were no attributes specified or
1551 * if -d/--detailed, -d -d or -d -d -d is used:
1552 */
1553static int add_default_attributes(void)
1554{
1555 int err;
1556 struct perf_event_attr default_attrs0[] = {
1557
1558 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
1559 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
1560 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
1561 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
1562
1563 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
1564};
1565 struct perf_event_attr frontend_attrs[] = {
1566 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1567};
1568 struct perf_event_attr backend_attrs[] = {
1569 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
1570};
1571 struct perf_event_attr default_attrs1[] = {
1572 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
1573 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
1574 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
1575
1576};
1577
1578/*
1579 * Detailed stats (-d), covering the L1 and last level data caches:
1580 */
1581 struct perf_event_attr detailed_attrs[] = {
1582
1583 { .type = PERF_TYPE_HW_CACHE,
1584 .config =
1585 PERF_COUNT_HW_CACHE_L1D << 0 |
1586 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1587 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1588
1589 { .type = PERF_TYPE_HW_CACHE,
1590 .config =
1591 PERF_COUNT_HW_CACHE_L1D << 0 |
1592 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1593 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1594
1595 { .type = PERF_TYPE_HW_CACHE,
1596 .config =
1597 PERF_COUNT_HW_CACHE_LL << 0 |
1598 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1599 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1600
1601 { .type = PERF_TYPE_HW_CACHE,
1602 .config =
1603 PERF_COUNT_HW_CACHE_LL << 0 |
1604 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1605 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1606};
1607
1608/*
1609 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1610 */
1611 struct perf_event_attr very_detailed_attrs[] = {
1612
1613 { .type = PERF_TYPE_HW_CACHE,
1614 .config =
1615 PERF_COUNT_HW_CACHE_L1I << 0 |
1616 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1617 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1618
1619 { .type = PERF_TYPE_HW_CACHE,
1620 .config =
1621 PERF_COUNT_HW_CACHE_L1I << 0 |
1622 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1623 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1624
1625 { .type = PERF_TYPE_HW_CACHE,
1626 .config =
1627 PERF_COUNT_HW_CACHE_DTLB << 0 |
1628 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1629 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1630
1631 { .type = PERF_TYPE_HW_CACHE,
1632 .config =
1633 PERF_COUNT_HW_CACHE_DTLB << 0 |
1634 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1635 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1636
1637 { .type = PERF_TYPE_HW_CACHE,
1638 .config =
1639 PERF_COUNT_HW_CACHE_ITLB << 0 |
1640 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1641 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1642
1643 { .type = PERF_TYPE_HW_CACHE,
1644 .config =
1645 PERF_COUNT_HW_CACHE_ITLB << 0 |
1646 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1647 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1648
1649};
1650
1651/*
1652 * Very, very detailed stats (-d -d -d), adding prefetch events:
1653 */
1654 struct perf_event_attr very_very_detailed_attrs[] = {
1655
1656 { .type = PERF_TYPE_HW_CACHE,
1657 .config =
1658 PERF_COUNT_HW_CACHE_L1D << 0 |
1659 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1660 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1661
1662 { .type = PERF_TYPE_HW_CACHE,
1663 .config =
1664 PERF_COUNT_HW_CACHE_L1D << 0 |
1665 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1666 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1667};
1668 struct parse_events_error errinfo;
1669
1670 /* Set attrs if no event is selected and !null_run: */
1671 if (stat_config.null_run)
1672 return 0;
1673
1674 bzero(&errinfo, sizeof(errinfo));
1675 if (transaction_run) {
1676 /* Handle -T as -M transaction. Once platform specific metrics
1677 * support has been added to the json files, all archictures
1678 * will use this approach. To determine transaction support
1679 * on an architecture test for such a metric name.
1680 */
1681 if (metricgroup__has_metric("transaction")) {
1682 struct option opt = { .value = &evsel_list };
1683
1684 return metricgroup__parse_groups(&opt, "transaction",
1685 stat_config.metric_no_group,
1686 stat_config.metric_no_merge,
1687 &stat_config.metric_events);
1688 }
1689
1690 if (pmu_have_event("cpu", "cycles-ct") &&
1691 pmu_have_event("cpu", "el-start"))
1692 err = parse_events(evsel_list, transaction_attrs,
1693 &errinfo);
1694 else
1695 err = parse_events(evsel_list,
1696 transaction_limited_attrs,
1697 &errinfo);
1698 if (err) {
1699 fprintf(stderr, "Cannot set up transaction events\n");
1700 parse_events_print_error(&errinfo, transaction_attrs);
1701 return -1;
1702 }
1703 return 0;
1704 }
1705
1706 if (smi_cost) {
1707 int smi;
1708
1709 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
1710 fprintf(stderr, "freeze_on_smi is not supported.\n");
1711 return -1;
1712 }
1713
1714 if (!smi) {
1715 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
1716 fprintf(stderr, "Failed to set freeze_on_smi.\n");
1717 return -1;
1718 }
1719 smi_reset = true;
1720 }
1721
1722 if (pmu_have_event("msr", "aperf") &&
1723 pmu_have_event("msr", "smi")) {
1724 if (!force_metric_only)
1725 stat_config.metric_only = true;
1726 err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
1727 } else {
1728 fprintf(stderr, "To measure SMI cost, it needs "
1729 "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
1730 parse_events_print_error(&errinfo, smi_cost_attrs);
1731 return -1;
1732 }
1733 if (err) {
1734 parse_events_print_error(&errinfo, smi_cost_attrs);
1735 fprintf(stderr, "Cannot set up SMI cost events\n");
1736 return -1;
1737 }
1738 return 0;
1739 }
1740
1741 if (topdown_run) {
1742 char *str = NULL;
1743 bool warn = false;
1744
1745 if (stat_config.aggr_mode != AGGR_GLOBAL &&
1746 stat_config.aggr_mode != AGGR_CORE) {
1747 pr_err("top down event configuration requires --per-core mode\n");
1748 return -1;
1749 }
1750 stat_config.aggr_mode = AGGR_CORE;
1751 if (nr_cgroups || !target__has_cpu(&target)) {
1752 pr_err("top down event configuration requires system-wide mode (-a)\n");
1753 return -1;
1754 }
1755
1756 if (!force_metric_only)
1757 stat_config.metric_only = true;
1758 if (topdown_filter_events(topdown_attrs, &str,
1759 arch_topdown_check_group(&warn)) < 0) {
1760 pr_err("Out of memory\n");
1761 return -1;
1762 }
1763 if (topdown_attrs[0] && str) {
1764 if (warn)
1765 arch_topdown_group_warn();
1766 err = parse_events(evsel_list, str, &errinfo);
1767 if (err) {
1768 fprintf(stderr,
1769 "Cannot set up top down events %s: %d\n",
1770 str, err);
1771 parse_events_print_error(&errinfo, str);
1772 free(str);
1773 return -1;
1774 }
1775 } else {
1776 fprintf(stderr, "System does not support topdown\n");
1777 return -1;
1778 }
1779 free(str);
1780 }
1781
1782 if (!evsel_list->core.nr_entries) {
1783 if (target__has_cpu(&target))
1784 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
1785
1786 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
1787 return -1;
1788 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
1789 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
1790 return -1;
1791 }
1792 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
1793 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
1794 return -1;
1795 }
1796 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
1797 return -1;
1798 }
1799
1800 /* Detailed events get appended to the event list: */
1801
1802 if (detailed_run < 1)
1803 return 0;
1804
1805 /* Append detailed run extra attributes: */
1806 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1807 return -1;
1808
1809 if (detailed_run < 2)
1810 return 0;
1811
1812 /* Append very detailed run extra attributes: */
1813 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1814 return -1;
1815
1816 if (detailed_run < 3)
1817 return 0;
1818
1819 /* Append very, very detailed run extra attributes: */
1820 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1821}
1822
1823static const char * const stat_record_usage[] = {
1824 "perf stat record [<options>]",
1825 NULL,
1826};
1827
1828static void init_features(struct perf_session *session)
1829{
1830 int feat;
1831
1832 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1833 perf_header__set_feat(&session->header, feat);
1834
1835 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
1836 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1837 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1838 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1839 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1840}
1841
1842static int __cmd_record(int argc, const char **argv)
1843{
1844 struct perf_session *session;
1845 struct perf_data *data = &perf_stat.data;
1846
1847 argc = parse_options(argc, argv, stat_options, stat_record_usage,
1848 PARSE_OPT_STOP_AT_NON_OPTION);
1849
1850 if (output_name)
1851 data->path = output_name;
1852
1853 if (stat_config.run_count != 1 || forever) {
1854 pr_err("Cannot use -r option with perf stat record.\n");
1855 return -1;
1856 }
1857
1858 session = perf_session__new(data, false, NULL);
1859 if (IS_ERR(session)) {
1860 pr_err("Perf session creation failed\n");
1861 return PTR_ERR(session);
1862 }
1863
1864 init_features(session);
1865
1866 session->evlist = evsel_list;
1867 perf_stat.session = session;
1868 perf_stat.record = true;
1869 return argc;
1870}
1871
1872static int process_stat_round_event(struct perf_session *session,
1873 union perf_event *event)
1874{
1875 struct perf_record_stat_round *stat_round = &event->stat_round;
1876 struct evsel *counter;
1877 struct timespec tsh, *ts = NULL;
1878 const char **argv = session->header.env.cmdline_argv;
1879 int argc = session->header.env.nr_cmdline;
1880
1881 evlist__for_each_entry(evsel_list, counter)
1882 perf_stat_process_counter(&stat_config, counter);
1883
1884 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
1885 update_stats(&walltime_nsecs_stats, stat_round->time);
1886
1887 if (stat_config.interval && stat_round->time) {
1888 tsh.tv_sec = stat_round->time / NSEC_PER_SEC;
1889 tsh.tv_nsec = stat_round->time % NSEC_PER_SEC;
1890 ts = &tsh;
1891 }
1892
1893 print_counters(ts, argc, argv);
1894 return 0;
1895}
1896
1897static
1898int process_stat_config_event(struct perf_session *session,
1899 union perf_event *event)
1900{
1901 struct perf_tool *tool = session->tool;
1902 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1903
1904 perf_event__read_stat_config(&stat_config, &event->stat_config);
1905
1906 if (perf_cpu_map__empty(st->cpus)) {
1907 if (st->aggr_mode != AGGR_UNSET)
1908 pr_warning("warning: processing task data, aggregation mode not set\n");
1909 return 0;
1910 }
1911
1912 if (st->aggr_mode != AGGR_UNSET)
1913 stat_config.aggr_mode = st->aggr_mode;
1914
1915 if (perf_stat.data.is_pipe)
1916 perf_stat_init_aggr_mode();
1917 else
1918 perf_stat_init_aggr_mode_file(st);
1919
1920 return 0;
1921}
1922
1923static int set_maps(struct perf_stat *st)
1924{
1925 if (!st->cpus || !st->threads)
1926 return 0;
1927
1928 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
1929 return -EINVAL;
1930
1931 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
1932
1933 if (perf_evlist__alloc_stats(evsel_list, true))
1934 return -ENOMEM;
1935
1936 st->maps_allocated = true;
1937 return 0;
1938}
1939
1940static
1941int process_thread_map_event(struct perf_session *session,
1942 union perf_event *event)
1943{
1944 struct perf_tool *tool = session->tool;
1945 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1946
1947 if (st->threads) {
1948 pr_warning("Extra thread map event, ignoring.\n");
1949 return 0;
1950 }
1951
1952 st->threads = thread_map__new_event(&event->thread_map);
1953 if (!st->threads)
1954 return -ENOMEM;
1955
1956 return set_maps(st);
1957}
1958
1959static
1960int process_cpu_map_event(struct perf_session *session,
1961 union perf_event *event)
1962{
1963 struct perf_tool *tool = session->tool;
1964 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
1965 struct perf_cpu_map *cpus;
1966
1967 if (st->cpus) {
1968 pr_warning("Extra cpu map event, ignoring.\n");
1969 return 0;
1970 }
1971
1972 cpus = cpu_map__new_data(&event->cpu_map.data);
1973 if (!cpus)
1974 return -ENOMEM;
1975
1976 st->cpus = cpus;
1977 return set_maps(st);
1978}
1979
1980static const char * const stat_report_usage[] = {
1981 "perf stat report [<options>]",
1982 NULL,
1983};
1984
1985static struct perf_stat perf_stat = {
1986 .tool = {
1987 .attr = perf_event__process_attr,
1988 .event_update = perf_event__process_event_update,
1989 .thread_map = process_thread_map_event,
1990 .cpu_map = process_cpu_map_event,
1991 .stat_config = process_stat_config_event,
1992 .stat = perf_event__process_stat_event,
1993 .stat_round = process_stat_round_event,
1994 },
1995 .aggr_mode = AGGR_UNSET,
1996};
1997
1998static int __cmd_report(int argc, const char **argv)
1999{
2000 struct perf_session *session;
2001 const struct option options[] = {
2002 OPT_STRING('i', "input", &input_name, "file", "input file name"),
2003 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
2004 "aggregate counts per processor socket", AGGR_SOCKET),
2005 OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
2006 "aggregate counts per processor die", AGGR_DIE),
2007 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
2008 "aggregate counts per physical processor core", AGGR_CORE),
2009 OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode,
2010 "aggregate counts per numa node", AGGR_NODE),
2011 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
2012 "disable CPU count aggregation", AGGR_NONE),
2013 OPT_END()
2014 };
2015 struct stat st;
2016 int ret;
2017
2018 argc = parse_options(argc, argv, options, stat_report_usage, 0);
2019
2020 if (!input_name || !strlen(input_name)) {
2021 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
2022 input_name = "-";
2023 else
2024 input_name = "perf.data";
2025 }
2026
2027 perf_stat.data.path = input_name;
2028 perf_stat.data.mode = PERF_DATA_MODE_READ;
2029
2030 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
2031 if (IS_ERR(session))
2032 return PTR_ERR(session);
2033
2034 perf_stat.session = session;
2035 stat_config.output = stderr;
2036 evsel_list = session->evlist;
2037
2038 ret = perf_session__process_events(session);
2039 if (ret)
2040 return ret;
2041
2042 perf_session__delete(session);
2043 return 0;
2044}
2045
2046static void setup_system_wide(int forks)
2047{
2048 /*
2049 * Make system wide (-a) the default target if
2050 * no target was specified and one of following
2051 * conditions is met:
2052 *
2053 * - there's no workload specified
2054 * - there is workload specified but all requested
2055 * events are system wide events
2056 */
2057 if (!target__none(&target))
2058 return;
2059
2060 if (!forks)
2061 target.system_wide = true;
2062 else {
2063 struct evsel *counter;
2064
2065 evlist__for_each_entry(evsel_list, counter) {
2066 if (!counter->core.system_wide)
2067 return;
2068 }
2069
2070 if (evsel_list->core.nr_entries)
2071 target.system_wide = true;
2072 }
2073}
2074
2075int cmd_stat(int argc, const char **argv)
2076{
2077 const char * const stat_usage[] = {
2078 "perf stat [<options>] [<command>]",
2079 NULL
2080 };
2081 int status = -EINVAL, run_idx;
2082 const char *mode;
2083 FILE *output = stderr;
2084 unsigned int interval, timeout;
2085 const char * const stat_subcommands[] = { "record", "report" };
2086
2087 setlocale(LC_ALL, "");
2088
2089 evsel_list = evlist__new();
2090 if (evsel_list == NULL)
2091 return -ENOMEM;
2092
2093 parse_events__shrink_config_terms();
2094
2095 /* String-parsing callback-based options would segfault when negated */
2096 set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
2097 set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
2098 set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
2099
2100 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
2101 (const char **) stat_usage,
2102 PARSE_OPT_STOP_AT_NON_OPTION);
2103 perf_stat__collect_metric_expr(evsel_list);
2104 perf_stat__init_shadow_stats();
2105
2106 if (stat_config.csv_sep) {
2107 stat_config.csv_output = true;
2108 if (!strcmp(stat_config.csv_sep, "\\t"))
2109 stat_config.csv_sep = "\t";
2110 } else
2111 stat_config.csv_sep = DEFAULT_SEPARATOR;
2112
2113 if (argc && !strncmp(argv[0], "rec", 3)) {
2114 argc = __cmd_record(argc, argv);
2115 if (argc < 0)
2116 return -1;
2117 } else if (argc && !strncmp(argv[0], "rep", 3))
2118 return __cmd_report(argc, argv);
2119
2120 interval = stat_config.interval;
2121 timeout = stat_config.timeout;
2122
2123 /*
2124 * For record command the -o is already taken care of.
2125 */
2126 if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
2127 output = NULL;
2128
2129 if (output_name && output_fd) {
2130 fprintf(stderr, "cannot use both --output and --log-fd\n");
2131 parse_options_usage(stat_usage, stat_options, "o", 1);
2132 parse_options_usage(NULL, stat_options, "log-fd", 0);
2133 goto out;
2134 }
2135
2136 if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
2137 fprintf(stderr, "--metric-only is not supported with --per-thread\n");
2138 goto out;
2139 }
2140
2141 if (stat_config.metric_only && stat_config.run_count > 1) {
2142 fprintf(stderr, "--metric-only is not supported with -r\n");
2143 goto out;
2144 }
2145
2146 if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
2147 fprintf(stderr, "--table is only supported with -r\n");
2148 parse_options_usage(stat_usage, stat_options, "r", 1);
2149 parse_options_usage(NULL, stat_options, "table", 0);
2150 goto out;
2151 }
2152
2153 if (output_fd < 0) {
2154 fprintf(stderr, "argument to --log-fd must be a > 0\n");
2155 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
2156 goto out;
2157 }
2158
2159 if (!output) {
2160 struct timespec tm;
2161 mode = append_file ? "a" : "w";
2162
2163 output = fopen(output_name, mode);
2164 if (!output) {
2165 perror("failed to create output file");
2166 return -1;
2167 }
2168 clock_gettime(CLOCK_REALTIME, &tm);
2169 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
2170 } else if (output_fd > 0) {
2171 mode = append_file ? "a" : "w";
2172 output = fdopen(output_fd, mode);
2173 if (!output) {
2174 perror("Failed opening logfd");
2175 return -errno;
2176 }
2177 }
2178
2179 stat_config.output = output;
2180
2181 /*
2182 * let the spreadsheet do the pretty-printing
2183 */
2184 if (stat_config.csv_output) {
2185 /* User explicitly passed -B? */
2186 if (big_num_opt == 1) {
2187 fprintf(stderr, "-B option not supported with -x\n");
2188 parse_options_usage(stat_usage, stat_options, "B", 1);
2189 parse_options_usage(NULL, stat_options, "x", 1);
2190 goto out;
2191 } else /* Nope, so disable big number formatting */
2192 stat_config.big_num = false;
2193 } else if (big_num_opt == 0) /* User passed --no-big-num */
2194 stat_config.big_num = false;
2195
2196 setup_system_wide(argc);
2197
2198 /*
2199 * Display user/system times only for single
2200 * run and when there's specified tracee.
2201 */
2202 if ((stat_config.run_count == 1) && target__none(&target))
2203 stat_config.ru_display = true;
2204
2205 if (stat_config.run_count < 0) {
2206 pr_err("Run count must be a positive number\n");
2207 parse_options_usage(stat_usage, stat_options, "r", 1);
2208 goto out;
2209 } else if (stat_config.run_count == 0) {
2210 forever = true;
2211 stat_config.run_count = 1;
2212 }
2213
2214 if (stat_config.walltime_run_table) {
2215 stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
2216 if (!stat_config.walltime_run) {
2217 pr_err("failed to setup -r option");
2218 goto out;
2219 }
2220 }
2221
2222 if ((stat_config.aggr_mode == AGGR_THREAD) &&
2223 !target__has_task(&target)) {
2224 if (!target.system_wide || target.cpu_list) {
2225 fprintf(stderr, "The --per-thread option is only "
2226 "available when monitoring via -p -t -a "
2227 "options or only --per-thread.\n");
2228 parse_options_usage(NULL, stat_options, "p", 1);
2229 parse_options_usage(NULL, stat_options, "t", 1);
2230 goto out;
2231 }
2232 }
2233
2234 /*
2235 * no_aggr, cgroup are for system-wide only
2236 * --per-thread is aggregated per thread, we dont mix it with cpu mode
2237 */
2238 if (((stat_config.aggr_mode != AGGR_GLOBAL &&
2239 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
2240 !target__has_cpu(&target)) {
2241 fprintf(stderr, "both cgroup and no-aggregation "
2242 "modes only available in system-wide mode\n");
2243
2244 parse_options_usage(stat_usage, stat_options, "G", 1);
2245 parse_options_usage(NULL, stat_options, "A", 1);
2246 parse_options_usage(NULL, stat_options, "a", 1);
2247 goto out;
2248 }
2249
2250 if (add_default_attributes())
2251 goto out;
2252
2253 target__validate(&target);
2254
2255 if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
2256 target.per_thread = true;
2257
2258 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
2259 if (target__has_task(&target)) {
2260 pr_err("Problems finding threads of monitor\n");
2261 parse_options_usage(stat_usage, stat_options, "p", 1);
2262 parse_options_usage(NULL, stat_options, "t", 1);
2263 } else if (target__has_cpu(&target)) {
2264 perror("failed to parse CPUs map");
2265 parse_options_usage(stat_usage, stat_options, "C", 1);
2266 parse_options_usage(NULL, stat_options, "a", 1);
2267 }
2268 goto out;
2269 }
2270
2271 evlist__check_cpu_maps(evsel_list);
2272
2273 /*
2274 * Initialize thread_map with comm names,
2275 * so we could print it out on output.
2276 */
2277 if (stat_config.aggr_mode == AGGR_THREAD) {
2278 thread_map__read_comms(evsel_list->core.threads);
2279 if (target.system_wide) {
2280 if (runtime_stat_new(&stat_config,
2281 perf_thread_map__nr(evsel_list->core.threads))) {
2282 goto out;
2283 }
2284 }
2285 }
2286
2287 if (stat_config.aggr_mode == AGGR_NODE)
2288 cpu__setup_cpunode_map();
2289
2290 if (stat_config.times && interval)
2291 interval_count = true;
2292 else if (stat_config.times && !interval) {
2293 pr_err("interval-count option should be used together with "
2294 "interval-print.\n");
2295 parse_options_usage(stat_usage, stat_options, "interval-count", 0);
2296 parse_options_usage(stat_usage, stat_options, "I", 1);
2297 goto out;
2298 }
2299
2300 if (timeout && timeout < 100) {
2301 if (timeout < 10) {
2302 pr_err("timeout must be >= 10ms.\n");
2303 parse_options_usage(stat_usage, stat_options, "timeout", 0);
2304 goto out;
2305 } else
2306 pr_warning("timeout < 100ms. "
2307 "The overhead percentage could be high in some cases. "
2308 "Please proceed with caution.\n");
2309 }
2310 if (timeout && interval) {
2311 pr_err("timeout option is not supported with interval-print.\n");
2312 parse_options_usage(stat_usage, stat_options, "timeout", 0);
2313 parse_options_usage(stat_usage, stat_options, "I", 1);
2314 goto out;
2315 }
2316
2317 if (perf_evlist__alloc_stats(evsel_list, interval))
2318 goto out;
2319
2320 if (perf_stat_init_aggr_mode())
2321 goto out;
2322
2323 /*
2324 * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
2325 * while avoiding that older tools show confusing messages.
2326 *
2327 * However for pipe sessions we need to keep it zero,
2328 * because script's perf_evsel__check_attr is triggered
2329 * by attr->sample_type != 0, and we can't run it on
2330 * stat sessions.
2331 */
2332 stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
2333
2334 /*
2335 * We dont want to block the signals - that would cause
2336 * child tasks to inherit that and Ctrl-C would not work.
2337 * What we want is for Ctrl-C to work in the exec()-ed
2338 * task, but being ignored by perf stat itself:
2339 */
2340 atexit(sig_atexit);
2341 if (!forever)
2342 signal(SIGINT, skip_signal);
2343 signal(SIGCHLD, skip_signal);
2344 signal(SIGALRM, skip_signal);
2345 signal(SIGABRT, skip_signal);
2346
2347 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
2348 goto out;
2349
2350 status = 0;
2351 for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
2352 if (stat_config.run_count != 1 && verbose > 0)
2353 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
2354 run_idx + 1);
2355
2356 if (run_idx != 0)
2357 perf_evlist__reset_prev_raw_counts(evsel_list);
2358
2359 status = run_perf_stat(argc, argv, run_idx);
2360 if (forever && status != -1 && !interval) {
2361 print_counters(NULL, argc, argv);
2362 perf_stat__reset_stats();
2363 }
2364 }
2365
2366 if (!forever && status != -1 && (!interval || stat_config.summary))
2367 print_counters(NULL, argc, argv);
2368
2369 evlist__finalize_ctlfd(evsel_list);
2370
2371 if (STAT_RECORD) {
2372 /*
2373 * We synthesize the kernel mmap record just so that older tools
2374 * don't emit warnings about not being able to resolve symbols
2375 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
2376 * a saner message about no samples being in the perf.data file.
2377 *
2378 * This also serves to suppress a warning about f_header.data.size == 0
2379 * in header.c at the moment 'perf stat record' gets introduced, which
2380 * is not really needed once we start adding the stat specific PERF_RECORD_
2381 * records, but the need to suppress the kptr_restrict messages in older
2382 * tools remain -acme
2383 */
2384 int fd = perf_data__fd(&perf_stat.data);
2385 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
2386 process_synthesized_event,
2387 &perf_stat.session->machines.host);
2388 if (err) {
2389 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
2390 "older tools may produce warnings about this file\n.");
2391 }
2392
2393 if (!interval) {
2394 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
2395 pr_err("failed to write stat round event\n");
2396 }
2397
2398 if (!perf_stat.data.is_pipe) {
2399 perf_stat.session->header.data_size += perf_stat.bytes_written;
2400 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2401 }
2402
2403 evlist__close(evsel_list);
2404 perf_session__delete(perf_stat.session);
2405 }
2406
2407 perf_stat__exit_aggr_mode();
2408 perf_evlist__free_stats(evsel_list);
2409out:
2410 zfree(&stat_config.walltime_run);
2411
2412 if (smi_cost && smi_reset)
2413 sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
2414
2415 evlist__delete(evsel_list);
2416
2417 metricgroup__rblist_exit(&stat_config.metric_events);
2418 runtime_stat_delete(&stat_config);
2419
2420 return status;
2421}