Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <stdio.h>
3#include "evsel.h"
4#include "stat.h"
5#include "color.h"
6#include "pmu.h"
7#include "rblist.h"
8#include "evlist.h"
9#include "expr.h"
10#include "metricgroup.h"
11
12/*
13 * AGGR_GLOBAL: Use CPU 0
14 * AGGR_SOCKET: Use first CPU of socket
15 * AGGR_CORE: Use first CPU of core
16 * AGGR_NONE: Use matching CPU
17 * AGGR_THREAD: Not supported?
18 */
19static bool have_frontend_stalled;
20
21struct runtime_stat rt_stat;
22struct stats walltime_nsecs_stats;
23
24struct saved_value {
25 struct rb_node rb_node;
26 struct perf_evsel *evsel;
27 enum stat_type type;
28 int ctx;
29 int cpu;
30 struct runtime_stat *stat;
31 struct stats stats;
32};
33
34static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
35{
36 struct saved_value *a = container_of(rb_node,
37 struct saved_value,
38 rb_node);
39 const struct saved_value *b = entry;
40
41 if (a->cpu != b->cpu)
42 return a->cpu - b->cpu;
43
44 /*
45 * Previously the rbtree was used to link generic metrics.
46 * The keys were evsel/cpu. Now the rbtree is extended to support
47 * per-thread shadow stats. For shadow stats case, the keys
48 * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
49 * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
50 */
51 if (a->type != b->type)
52 return a->type - b->type;
53
54 if (a->ctx != b->ctx)
55 return a->ctx - b->ctx;
56
57 if (a->evsel == NULL && b->evsel == NULL) {
58 if (a->stat == b->stat)
59 return 0;
60
61 if ((char *)a->stat < (char *)b->stat)
62 return -1;
63
64 return 1;
65 }
66
67 if (a->evsel == b->evsel)
68 return 0;
69 if ((char *)a->evsel < (char *)b->evsel)
70 return -1;
71 return +1;
72}
73
74static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
75 const void *entry)
76{
77 struct saved_value *nd = malloc(sizeof(struct saved_value));
78
79 if (!nd)
80 return NULL;
81 memcpy(nd, entry, sizeof(struct saved_value));
82 return &nd->rb_node;
83}
84
85static void saved_value_delete(struct rblist *rblist __maybe_unused,
86 struct rb_node *rb_node)
87{
88 struct saved_value *v;
89
90 BUG_ON(!rb_node);
91 v = container_of(rb_node, struct saved_value, rb_node);
92 free(v);
93}
94
95static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
96 int cpu,
97 bool create,
98 enum stat_type type,
99 int ctx,
100 struct runtime_stat *st)
101{
102 struct rblist *rblist;
103 struct rb_node *nd;
104 struct saved_value dm = {
105 .cpu = cpu,
106 .evsel = evsel,
107 .type = type,
108 .ctx = ctx,
109 .stat = st,
110 };
111
112 rblist = &st->value_list;
113
114 nd = rblist__find(rblist, &dm);
115 if (nd)
116 return container_of(nd, struct saved_value, rb_node);
117 if (create) {
118 rblist__add_node(rblist, &dm);
119 nd = rblist__find(rblist, &dm);
120 if (nd)
121 return container_of(nd, struct saved_value, rb_node);
122 }
123 return NULL;
124}
125
126void runtime_stat__init(struct runtime_stat *st)
127{
128 struct rblist *rblist = &st->value_list;
129
130 rblist__init(rblist);
131 rblist->node_cmp = saved_value_cmp;
132 rblist->node_new = saved_value_new;
133 rblist->node_delete = saved_value_delete;
134}
135
136void runtime_stat__exit(struct runtime_stat *st)
137{
138 rblist__exit(&st->value_list);
139}
140
141void perf_stat__init_shadow_stats(void)
142{
143 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
144 runtime_stat__init(&rt_stat);
145}
146
147static int evsel_context(struct perf_evsel *evsel)
148{
149 int ctx = 0;
150
151 if (evsel->attr.exclude_kernel)
152 ctx |= CTX_BIT_KERNEL;
153 if (evsel->attr.exclude_user)
154 ctx |= CTX_BIT_USER;
155 if (evsel->attr.exclude_hv)
156 ctx |= CTX_BIT_HV;
157 if (evsel->attr.exclude_host)
158 ctx |= CTX_BIT_HOST;
159 if (evsel->attr.exclude_idle)
160 ctx |= CTX_BIT_IDLE;
161
162 return ctx;
163}
164
165static void reset_stat(struct runtime_stat *st)
166{
167 struct rblist *rblist;
168 struct rb_node *pos, *next;
169
170 rblist = &st->value_list;
171 next = rb_first(&rblist->entries);
172 while (next) {
173 pos = next;
174 next = rb_next(pos);
175 memset(&container_of(pos, struct saved_value, rb_node)->stats,
176 0,
177 sizeof(struct stats));
178 }
179}
180
181void perf_stat__reset_shadow_stats(void)
182{
183 reset_stat(&rt_stat);
184 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
185}
186
187void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
188{
189 reset_stat(st);
190}
191
192static void update_runtime_stat(struct runtime_stat *st,
193 enum stat_type type,
194 int ctx, int cpu, u64 count)
195{
196 struct saved_value *v = saved_value_lookup(NULL, cpu, true,
197 type, ctx, st);
198
199 if (v)
200 update_stats(&v->stats, count);
201}
202
203/*
204 * Update various tracking values we maintain to print
205 * more semantic information such as miss/hit ratios,
206 * instruction rates, etc:
207 */
208void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
209 int cpu, struct runtime_stat *st)
210{
211 int ctx = evsel_context(counter);
212
213 count *= counter->scale;
214
215 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
216 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
217 update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
218 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
219 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
220 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
221 update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
222 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
223 update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
224 else if (perf_stat_evsel__is(counter, ELISION_START))
225 update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
226 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
227 update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
228 ctx, cpu, count);
229 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
230 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
231 ctx, cpu, count);
232 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
233 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
234 ctx, cpu, count);
235 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
236 update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
237 ctx, cpu, count);
238 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
239 update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
240 ctx, cpu, count);
241 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
242 update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
243 ctx, cpu, count);
244 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
245 update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
246 ctx, cpu, count);
247 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
248 update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
249 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
250 update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
251 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
252 update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
253 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
254 update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
255 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
256 update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
257 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
258 update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
259 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
260 update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
261 else if (perf_stat_evsel__is(counter, SMI_NUM))
262 update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
263 else if (perf_stat_evsel__is(counter, APERF))
264 update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
265
266 if (counter->collect_stat) {
267 struct saved_value *v = saved_value_lookup(counter, cpu, true,
268 STAT_NONE, 0, st);
269 update_stats(&v->stats, count);
270 }
271}
272
273/* used for get_ratio_color() */
274enum grc_type {
275 GRC_STALLED_CYCLES_FE,
276 GRC_STALLED_CYCLES_BE,
277 GRC_CACHE_MISSES,
278 GRC_MAX_NR
279};
280
281static const char *get_ratio_color(enum grc_type type, double ratio)
282{
283 static const double grc_table[GRC_MAX_NR][3] = {
284 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
285 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
286 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
287 };
288 const char *color = PERF_COLOR_NORMAL;
289
290 if (ratio > grc_table[type][0])
291 color = PERF_COLOR_RED;
292 else if (ratio > grc_table[type][1])
293 color = PERF_COLOR_MAGENTA;
294 else if (ratio > grc_table[type][2])
295 color = PERF_COLOR_YELLOW;
296
297 return color;
298}
299
300static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
301 const char *name)
302{
303 struct perf_evsel *c2;
304
305 evlist__for_each_entry (evsel_list, c2) {
306 if (!strcasecmp(c2->name, name))
307 return c2;
308 }
309 return NULL;
310}
311
312/* Mark MetricExpr target events and link events using them to them. */
313void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
314{
315 struct perf_evsel *counter, *leader, **metric_events, *oc;
316 bool found;
317 const char **metric_names;
318 int i;
319 int num_metric_names;
320
321 evlist__for_each_entry(evsel_list, counter) {
322 bool invalid = false;
323
324 leader = counter->leader;
325 if (!counter->metric_expr)
326 continue;
327 metric_events = counter->metric_events;
328 if (!metric_events) {
329 if (expr__find_other(counter->metric_expr, counter->name,
330 &metric_names, &num_metric_names) < 0)
331 continue;
332
333 metric_events = calloc(sizeof(struct perf_evsel *),
334 num_metric_names + 1);
335 if (!metric_events)
336 return;
337 counter->metric_events = metric_events;
338 }
339
340 for (i = 0; i < num_metric_names; i++) {
341 found = false;
342 if (leader) {
343 /* Search in group */
344 for_each_group_member (oc, leader) {
345 if (!strcasecmp(oc->name, metric_names[i])) {
346 found = true;
347 break;
348 }
349 }
350 }
351 if (!found) {
352 /* Search ignoring groups */
353 oc = perf_stat__find_event(evsel_list, metric_names[i]);
354 }
355 if (!oc) {
356 /* Deduping one is good enough to handle duplicated PMUs. */
357 static char *printed;
358
359 /*
360 * Adding events automatically would be difficult, because
361 * it would risk creating groups that are not schedulable.
362 * perf stat doesn't understand all the scheduling constraints
363 * of events. So we ask the user instead to add the missing
364 * events.
365 */
366 if (!printed || strcasecmp(printed, metric_names[i])) {
367 fprintf(stderr,
368 "Add %s event to groups to get metric expression for %s\n",
369 metric_names[i],
370 counter->name);
371 printed = strdup(metric_names[i]);
372 }
373 invalid = true;
374 continue;
375 }
376 metric_events[i] = oc;
377 oc->collect_stat = true;
378 }
379 metric_events[i] = NULL;
380 free(metric_names);
381 if (invalid) {
382 free(metric_events);
383 counter->metric_events = NULL;
384 counter->metric_expr = NULL;
385 }
386 }
387}
388
389static double runtime_stat_avg(struct runtime_stat *st,
390 enum stat_type type, int ctx, int cpu)
391{
392 struct saved_value *v;
393
394 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
395 if (!v)
396 return 0.0;
397
398 return avg_stats(&v->stats);
399}
400
401static double runtime_stat_n(struct runtime_stat *st,
402 enum stat_type type, int ctx, int cpu)
403{
404 struct saved_value *v;
405
406 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
407 if (!v)
408 return 0.0;
409
410 return v->stats.n;
411}
412
413static void print_stalled_cycles_frontend(int cpu,
414 struct perf_evsel *evsel, double avg,
415 struct perf_stat_output_ctx *out,
416 struct runtime_stat *st)
417{
418 double total, ratio = 0.0;
419 const char *color;
420 int ctx = evsel_context(evsel);
421
422 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
423
424 if (total)
425 ratio = avg / total * 100.0;
426
427 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
428
429 if (ratio)
430 out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle",
431 ratio);
432 else
433 out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0);
434}
435
436static void print_stalled_cycles_backend(int cpu,
437 struct perf_evsel *evsel, double avg,
438 struct perf_stat_output_ctx *out,
439 struct runtime_stat *st)
440{
441 double total, ratio = 0.0;
442 const char *color;
443 int ctx = evsel_context(evsel);
444
445 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
446
447 if (total)
448 ratio = avg / total * 100.0;
449
450 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
451
452 out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
453}
454
455static void print_branch_misses(int cpu,
456 struct perf_evsel *evsel,
457 double avg,
458 struct perf_stat_output_ctx *out,
459 struct runtime_stat *st)
460{
461 double total, ratio = 0.0;
462 const char *color;
463 int ctx = evsel_context(evsel);
464
465 total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
466
467 if (total)
468 ratio = avg / total * 100.0;
469
470 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
471
472 out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio);
473}
474
475static void print_l1_dcache_misses(int cpu,
476 struct perf_evsel *evsel,
477 double avg,
478 struct perf_stat_output_ctx *out,
479 struct runtime_stat *st)
480
481{
482 double total, ratio = 0.0;
483 const char *color;
484 int ctx = evsel_context(evsel);
485
486 total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
487
488 if (total)
489 ratio = avg / total * 100.0;
490
491 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
492
493 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
494}
495
496static void print_l1_icache_misses(int cpu,
497 struct perf_evsel *evsel,
498 double avg,
499 struct perf_stat_output_ctx *out,
500 struct runtime_stat *st)
501
502{
503 double total, ratio = 0.0;
504 const char *color;
505 int ctx = evsel_context(evsel);
506
507 total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
508
509 if (total)
510 ratio = avg / total * 100.0;
511
512 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
513 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
514}
515
516static void print_dtlb_cache_misses(int cpu,
517 struct perf_evsel *evsel,
518 double avg,
519 struct perf_stat_output_ctx *out,
520 struct runtime_stat *st)
521{
522 double total, ratio = 0.0;
523 const char *color;
524 int ctx = evsel_context(evsel);
525
526 total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
527
528 if (total)
529 ratio = avg / total * 100.0;
530
531 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
532 out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
533}
534
535static void print_itlb_cache_misses(int cpu,
536 struct perf_evsel *evsel,
537 double avg,
538 struct perf_stat_output_ctx *out,
539 struct runtime_stat *st)
540{
541 double total, ratio = 0.0;
542 const char *color;
543 int ctx = evsel_context(evsel);
544
545 total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
546
547 if (total)
548 ratio = avg / total * 100.0;
549
550 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
551 out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
552}
553
554static void print_ll_cache_misses(int cpu,
555 struct perf_evsel *evsel,
556 double avg,
557 struct perf_stat_output_ctx *out,
558 struct runtime_stat *st)
559{
560 double total, ratio = 0.0;
561 const char *color;
562 int ctx = evsel_context(evsel);
563
564 total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
565
566 if (total)
567 ratio = avg / total * 100.0;
568
569 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
570 out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
571}
572
573/*
574 * High level "TopDown" CPU core pipe line bottleneck break down.
575 *
576 * Basic concept following
577 * Yasin, A Top Down Method for Performance analysis and Counter architecture
578 * ISPASS14
579 *
580 * The CPU pipeline is divided into 4 areas that can be bottlenecks:
581 *
582 * Frontend -> Backend -> Retiring
583 * BadSpeculation in addition means out of order execution that is thrown away
584 * (for example branch mispredictions)
585 * Frontend is instruction decoding.
586 * Backend is execution, like computation and accessing data in memory
587 * Retiring is good execution that is not directly bottlenecked
588 *
589 * The formulas are computed in slots.
590 * A slot is an entry in the pipeline each for the pipeline width
591 * (for example a 4-wide pipeline has 4 slots for each cycle)
592 *
593 * Formulas:
594 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
595 * TotalSlots
596 * Retiring = SlotsRetired / TotalSlots
597 * FrontendBound = FetchBubbles / TotalSlots
598 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
599 *
600 * The kernel provides the mapping to the low level CPU events and any scaling
601 * needed for the CPU pipeline width, for example:
602 *
603 * TotalSlots = Cycles * 4
604 *
605 * The scaling factor is communicated in the sysfs unit.
606 *
607 * In some cases the CPU may not be able to measure all the formulas due to
608 * missing events. In this case multiple formulas are combined, as possible.
609 *
610 * Full TopDown supports more levels to sub-divide each area: for example
611 * BackendBound into computing bound and memory bound. For now we only
612 * support Level 1 TopDown.
613 */
614
615static double sanitize_val(double x)
616{
617 if (x < 0 && x >= -0.02)
618 return 0.0;
619 return x;
620}
621
622static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
623{
624 return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
625}
626
627static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
628{
629 double bad_spec = 0;
630 double total_slots;
631 double total;
632
633 total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
634 runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
635 runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
636
637 total_slots = td_total_slots(ctx, cpu, st);
638 if (total_slots)
639 bad_spec = total / total_slots;
640 return sanitize_val(bad_spec);
641}
642
643static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
644{
645 double retiring = 0;
646 double total_slots = td_total_slots(ctx, cpu, st);
647 double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
648 ctx, cpu);
649
650 if (total_slots)
651 retiring = ret_slots / total_slots;
652 return retiring;
653}
654
655static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
656{
657 double fe_bound = 0;
658 double total_slots = td_total_slots(ctx, cpu, st);
659 double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
660 ctx, cpu);
661
662 if (total_slots)
663 fe_bound = fetch_bub / total_slots;
664 return fe_bound;
665}
666
667static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
668{
669 double sum = (td_fe_bound(ctx, cpu, st) +
670 td_bad_spec(ctx, cpu, st) +
671 td_retiring(ctx, cpu, st));
672 if (sum == 0)
673 return 0;
674 return sanitize_val(1.0 - sum);
675}
676
677static void print_smi_cost(int cpu, struct perf_evsel *evsel,
678 struct perf_stat_output_ctx *out,
679 struct runtime_stat *st)
680{
681 double smi_num, aperf, cycles, cost = 0.0;
682 int ctx = evsel_context(evsel);
683 const char *color = NULL;
684
685 smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
686 aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
687 cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
688
689 if ((cycles == 0) || (aperf == 0))
690 return;
691
692 if (smi_num)
693 cost = (aperf - cycles) / aperf * 100.00;
694
695 if (cost > 10)
696 color = PERF_COLOR_RED;
697 out->print_metric(out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
698 out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
699}
700
701static void generic_metric(const char *metric_expr,
702 struct perf_evsel **metric_events,
703 char *name,
704 const char *metric_name,
705 double avg,
706 int cpu,
707 struct perf_stat_output_ctx *out,
708 struct runtime_stat *st)
709{
710 print_metric_t print_metric = out->print_metric;
711 struct parse_ctx pctx;
712 double ratio;
713 int i;
714 void *ctxp = out->ctx;
715
716 expr__ctx_init(&pctx);
717 expr__add_id(&pctx, name, avg);
718 for (i = 0; metric_events[i]; i++) {
719 struct saved_value *v;
720 struct stats *stats;
721 double scale;
722
723 if (!strcmp(metric_events[i]->name, "duration_time")) {
724 stats = &walltime_nsecs_stats;
725 scale = 1e-9;
726 } else {
727 v = saved_value_lookup(metric_events[i], cpu, false,
728 STAT_NONE, 0, st);
729 if (!v)
730 break;
731 stats = &v->stats;
732 scale = 1.0;
733 }
734 expr__add_id(&pctx, metric_events[i]->name, avg_stats(stats)*scale);
735 }
736 if (!metric_events[i]) {
737 const char *p = metric_expr;
738
739 if (expr__parse(&ratio, &pctx, &p) == 0)
740 print_metric(ctxp, NULL, "%8.1f",
741 metric_name ?
742 metric_name :
743 out->force_header ? name : "",
744 ratio);
745 else
746 print_metric(ctxp, NULL, NULL,
747 out->force_header ?
748 (metric_name ? metric_name : name) : "", 0);
749 } else
750 print_metric(ctxp, NULL, NULL, "", 0);
751}
752
753void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
754 double avg, int cpu,
755 struct perf_stat_output_ctx *out,
756 struct rblist *metric_events,
757 struct runtime_stat *st)
758{
759 void *ctxp = out->ctx;
760 print_metric_t print_metric = out->print_metric;
761 double total, ratio = 0.0, total2;
762 const char *color = NULL;
763 int ctx = evsel_context(evsel);
764 struct metric_event *me;
765 int num = 1;
766
767 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
768 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
769
770 if (total) {
771 ratio = avg / total;
772 print_metric(ctxp, NULL, "%7.2f ",
773 "insn per cycle", ratio);
774 } else {
775 print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
776 }
777
778 total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
779 ctx, cpu);
780
781 total = max(total, runtime_stat_avg(st,
782 STAT_STALLED_CYCLES_BACK,
783 ctx, cpu));
784
785 if (total && avg) {
786 out->new_line(ctxp);
787 ratio = total / avg;
788 print_metric(ctxp, NULL, "%7.2f ",
789 "stalled cycles per insn",
790 ratio);
791 } else if (have_frontend_stalled) {
792 print_metric(ctxp, NULL, NULL,
793 "stalled cycles per insn", 0);
794 }
795 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
796 if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
797 print_branch_misses(cpu, evsel, avg, out, st);
798 else
799 print_metric(ctxp, NULL, NULL, "of all branches", 0);
800 } else if (
801 evsel->attr.type == PERF_TYPE_HW_CACHE &&
802 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
803 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
804 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
805
806 if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
807 print_l1_dcache_misses(cpu, evsel, avg, out, st);
808 else
809 print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
810 } else if (
811 evsel->attr.type == PERF_TYPE_HW_CACHE &&
812 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
813 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
814 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
815
816 if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
817 print_l1_icache_misses(cpu, evsel, avg, out, st);
818 else
819 print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
820 } else if (
821 evsel->attr.type == PERF_TYPE_HW_CACHE &&
822 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
823 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
824 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
825
826 if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
827 print_dtlb_cache_misses(cpu, evsel, avg, out, st);
828 else
829 print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
830 } else if (
831 evsel->attr.type == PERF_TYPE_HW_CACHE &&
832 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
833 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
834 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
835
836 if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
837 print_itlb_cache_misses(cpu, evsel, avg, out, st);
838 else
839 print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
840 } else if (
841 evsel->attr.type == PERF_TYPE_HW_CACHE &&
842 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
843 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
844 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
845
846 if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
847 print_ll_cache_misses(cpu, evsel, avg, out, st);
848 else
849 print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
850 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
851 total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
852
853 if (total)
854 ratio = avg * 100 / total;
855
856 if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
857 print_metric(ctxp, NULL, "%8.3f %%",
858 "of all cache refs", ratio);
859 else
860 print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
861 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
862 print_stalled_cycles_frontend(cpu, evsel, avg, out, st);
863 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
864 print_stalled_cycles_backend(cpu, evsel, avg, out, st);
865 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
866 total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
867
868 if (total) {
869 ratio = avg / total;
870 print_metric(ctxp, NULL, "%8.3f", "GHz", ratio);
871 } else {
872 print_metric(ctxp, NULL, NULL, "Ghz", 0);
873 }
874 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
875 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
876
877 if (total)
878 print_metric(ctxp, NULL,
879 "%7.2f%%", "transactional cycles",
880 100.0 * (avg / total));
881 else
882 print_metric(ctxp, NULL, NULL, "transactional cycles",
883 0);
884 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
885 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
886 total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
887
888 if (total2 < avg)
889 total2 = avg;
890 if (total)
891 print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles",
892 100.0 * ((total2-avg) / total));
893 else
894 print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
895 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
896 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
897 ctx, cpu);
898
899 if (avg)
900 ratio = total / avg;
901
902 if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
903 print_metric(ctxp, NULL, "%8.0f",
904 "cycles / transaction", ratio);
905 else
906 print_metric(ctxp, NULL, NULL, "cycles / transaction",
907 0);
908 } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
909 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
910 ctx, cpu);
911
912 if (avg)
913 ratio = total / avg;
914
915 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
916 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
917 perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
918 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
919 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
920 avg / ratio);
921 else
922 print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
923 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
924 double fe_bound = td_fe_bound(ctx, cpu, st);
925
926 if (fe_bound > 0.2)
927 color = PERF_COLOR_RED;
928 print_metric(ctxp, color, "%8.1f%%", "frontend bound",
929 fe_bound * 100.);
930 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
931 double retiring = td_retiring(ctx, cpu, st);
932
933 if (retiring > 0.7)
934 color = PERF_COLOR_GREEN;
935 print_metric(ctxp, color, "%8.1f%%", "retiring",
936 retiring * 100.);
937 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
938 double bad_spec = td_bad_spec(ctx, cpu, st);
939
940 if (bad_spec > 0.1)
941 color = PERF_COLOR_RED;
942 print_metric(ctxp, color, "%8.1f%%", "bad speculation",
943 bad_spec * 100.);
944 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
945 double be_bound = td_be_bound(ctx, cpu, st);
946 const char *name = "backend bound";
947 static int have_recovery_bubbles = -1;
948
949 /* In case the CPU does not support topdown-recovery-bubbles */
950 if (have_recovery_bubbles < 0)
951 have_recovery_bubbles = pmu_have_event("cpu",
952 "topdown-recovery-bubbles");
953 if (!have_recovery_bubbles)
954 name = "backend bound/bad spec";
955
956 if (be_bound > 0.2)
957 color = PERF_COLOR_RED;
958 if (td_total_slots(ctx, cpu, st) > 0)
959 print_metric(ctxp, color, "%8.1f%%", name,
960 be_bound * 100.);
961 else
962 print_metric(ctxp, NULL, NULL, name, 0);
963 } else if (evsel->metric_expr) {
964 generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
965 evsel->metric_name, avg, cpu, out, st);
966 } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
967 char unit = 'M';
968 char unit_buf[10];
969
970 total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
971
972 if (total)
973 ratio = 1000.0 * avg / total;
974 if (ratio < 0.001) {
975 ratio *= 1000;
976 unit = 'K';
977 }
978 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
979 print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
980 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
981 print_smi_cost(cpu, evsel, out, st);
982 } else {
983 num = 0;
984 }
985
986 if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
987 struct metric_expr *mexp;
988
989 list_for_each_entry (mexp, &me->head, nd) {
990 if (num++ > 0)
991 out->new_line(ctxp);
992 generic_metric(mexp->metric_expr, mexp->metric_events,
993 evsel->name, mexp->metric_name,
994 avg, cpu, out, st);
995 }
996 }
997 if (num == 0)
998 print_metric(ctxp, NULL, NULL, NULL, 0);
999}
1#include <stdio.h>
2#include "evsel.h"
3#include "stat.h"
4#include "color.h"
5#include "pmu.h"
6
7enum {
8 CTX_BIT_USER = 1 << 0,
9 CTX_BIT_KERNEL = 1 << 1,
10 CTX_BIT_HV = 1 << 2,
11 CTX_BIT_HOST = 1 << 3,
12 CTX_BIT_IDLE = 1 << 4,
13 CTX_BIT_MAX = 1 << 5,
14};
15
16#define NUM_CTX CTX_BIT_MAX
17
18/*
19 * AGGR_GLOBAL: Use CPU 0
20 * AGGR_SOCKET: Use first CPU of socket
21 * AGGR_CORE: Use first CPU of core
22 * AGGR_NONE: Use matching CPU
23 * AGGR_THREAD: Not supported?
24 */
25static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
26static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
27static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
28static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
29static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
30static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
31static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
32static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
33static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
34static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
35static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
36static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
37static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
38static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
39static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS];
40static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
41static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
42static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
43static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
44static bool have_frontend_stalled;
45
46struct stats walltime_nsecs_stats;
47
48void perf_stat__init_shadow_stats(void)
49{
50 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
51}
52
53static int evsel_context(struct perf_evsel *evsel)
54{
55 int ctx = 0;
56
57 if (evsel->attr.exclude_kernel)
58 ctx |= CTX_BIT_KERNEL;
59 if (evsel->attr.exclude_user)
60 ctx |= CTX_BIT_USER;
61 if (evsel->attr.exclude_hv)
62 ctx |= CTX_BIT_HV;
63 if (evsel->attr.exclude_host)
64 ctx |= CTX_BIT_HOST;
65 if (evsel->attr.exclude_idle)
66 ctx |= CTX_BIT_IDLE;
67
68 return ctx;
69}
70
71void perf_stat__reset_shadow_stats(void)
72{
73 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
74 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
75 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
76 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
77 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
78 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
79 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
80 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
81 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
82 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
83 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
84 memset(runtime_cycles_in_tx_stats, 0,
85 sizeof(runtime_cycles_in_tx_stats));
86 memset(runtime_transaction_stats, 0,
87 sizeof(runtime_transaction_stats));
88 memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
89 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
90 memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots));
91 memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired));
92 memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
93 memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
94 memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
95}
96
97/*
98 * Update various tracking values we maintain to print
99 * more semantic information such as miss/hit ratios,
100 * instruction rates, etc:
101 */
102void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
103 int cpu)
104{
105 int ctx = evsel_context(counter);
106
107 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
108 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
109 update_stats(&runtime_nsecs_stats[cpu], count[0]);
110 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
111 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
112 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
113 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
114 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
115 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
116 else if (perf_stat_evsel__is(counter, ELISION_START))
117 update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
118 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
119 update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]);
120 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
121 update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]);
122 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
123 update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]);
124 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
125 update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]);
126 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
127 update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]);
128 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
129 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
130 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
131 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]);
132 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
133 update_stats(&runtime_branches_stats[ctx][cpu], count[0]);
134 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
135 update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]);
136 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
137 update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]);
138 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
139 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
140 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
141 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
142 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
143 update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
144 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
145 update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
146}
147
148/* used for get_ratio_color() */
149enum grc_type {
150 GRC_STALLED_CYCLES_FE,
151 GRC_STALLED_CYCLES_BE,
152 GRC_CACHE_MISSES,
153 GRC_MAX_NR
154};
155
156static const char *get_ratio_color(enum grc_type type, double ratio)
157{
158 static const double grc_table[GRC_MAX_NR][3] = {
159 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
160 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
161 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
162 };
163 const char *color = PERF_COLOR_NORMAL;
164
165 if (ratio > grc_table[type][0])
166 color = PERF_COLOR_RED;
167 else if (ratio > grc_table[type][1])
168 color = PERF_COLOR_MAGENTA;
169 else if (ratio > grc_table[type][2])
170 color = PERF_COLOR_YELLOW;
171
172 return color;
173}
174
175static void print_stalled_cycles_frontend(int cpu,
176 struct perf_evsel *evsel, double avg,
177 struct perf_stat_output_ctx *out)
178{
179 double total, ratio = 0.0;
180 const char *color;
181 int ctx = evsel_context(evsel);
182
183 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
184
185 if (total)
186 ratio = avg / total * 100.0;
187
188 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
189
190 if (ratio)
191 out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle",
192 ratio);
193 else
194 out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0);
195}
196
197static void print_stalled_cycles_backend(int cpu,
198 struct perf_evsel *evsel, double avg,
199 struct perf_stat_output_ctx *out)
200{
201 double total, ratio = 0.0;
202 const char *color;
203 int ctx = evsel_context(evsel);
204
205 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
206
207 if (total)
208 ratio = avg / total * 100.0;
209
210 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
211
212 out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
213}
214
215static void print_branch_misses(int cpu,
216 struct perf_evsel *evsel,
217 double avg,
218 struct perf_stat_output_ctx *out)
219{
220 double total, ratio = 0.0;
221 const char *color;
222 int ctx = evsel_context(evsel);
223
224 total = avg_stats(&runtime_branches_stats[ctx][cpu]);
225
226 if (total)
227 ratio = avg / total * 100.0;
228
229 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
230
231 out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio);
232}
233
234static void print_l1_dcache_misses(int cpu,
235 struct perf_evsel *evsel,
236 double avg,
237 struct perf_stat_output_ctx *out)
238{
239 double total, ratio = 0.0;
240 const char *color;
241 int ctx = evsel_context(evsel);
242
243 total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
244
245 if (total)
246 ratio = avg / total * 100.0;
247
248 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
249
250 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
251}
252
253static void print_l1_icache_misses(int cpu,
254 struct perf_evsel *evsel,
255 double avg,
256 struct perf_stat_output_ctx *out)
257{
258 double total, ratio = 0.0;
259 const char *color;
260 int ctx = evsel_context(evsel);
261
262 total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
263
264 if (total)
265 ratio = avg / total * 100.0;
266
267 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
268 out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
269}
270
271static void print_dtlb_cache_misses(int cpu,
272 struct perf_evsel *evsel,
273 double avg,
274 struct perf_stat_output_ctx *out)
275{
276 double total, ratio = 0.0;
277 const char *color;
278 int ctx = evsel_context(evsel);
279
280 total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
281
282 if (total)
283 ratio = avg / total * 100.0;
284
285 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
286 out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
287}
288
289static void print_itlb_cache_misses(int cpu,
290 struct perf_evsel *evsel,
291 double avg,
292 struct perf_stat_output_ctx *out)
293{
294 double total, ratio = 0.0;
295 const char *color;
296 int ctx = evsel_context(evsel);
297
298 total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
299
300 if (total)
301 ratio = avg / total * 100.0;
302
303 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
304 out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
305}
306
307static void print_ll_cache_misses(int cpu,
308 struct perf_evsel *evsel,
309 double avg,
310 struct perf_stat_output_ctx *out)
311{
312 double total, ratio = 0.0;
313 const char *color;
314 int ctx = evsel_context(evsel);
315
316 total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
317
318 if (total)
319 ratio = avg / total * 100.0;
320
321 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
322 out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
323}
324
325/*
326 * High level "TopDown" CPU core pipe line bottleneck break down.
327 *
328 * Basic concept following
329 * Yasin, A Top Down Method for Performance analysis and Counter architecture
330 * ISPASS14
331 *
332 * The CPU pipeline is divided into 4 areas that can be bottlenecks:
333 *
334 * Frontend -> Backend -> Retiring
335 * BadSpeculation in addition means out of order execution that is thrown away
336 * (for example branch mispredictions)
337 * Frontend is instruction decoding.
338 * Backend is execution, like computation and accessing data in memory
339 * Retiring is good execution that is not directly bottlenecked
340 *
341 * The formulas are computed in slots.
342 * A slot is an entry in the pipeline each for the pipeline width
343 * (for example a 4-wide pipeline has 4 slots for each cycle)
344 *
345 * Formulas:
346 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
347 * TotalSlots
348 * Retiring = SlotsRetired / TotalSlots
349 * FrontendBound = FetchBubbles / TotalSlots
350 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
351 *
352 * The kernel provides the mapping to the low level CPU events and any scaling
353 * needed for the CPU pipeline width, for example:
354 *
355 * TotalSlots = Cycles * 4
356 *
357 * The scaling factor is communicated in the sysfs unit.
358 *
359 * In some cases the CPU may not be able to measure all the formulas due to
360 * missing events. In this case multiple formulas are combined, as possible.
361 *
362 * Full TopDown supports more levels to sub-divide each area: for example
363 * BackendBound into computing bound and memory bound. For now we only
364 * support Level 1 TopDown.
365 */
366
367static double sanitize_val(double x)
368{
369 if (x < 0 && x >= -0.02)
370 return 0.0;
371 return x;
372}
373
374static double td_total_slots(int ctx, int cpu)
375{
376 return avg_stats(&runtime_topdown_total_slots[ctx][cpu]);
377}
378
379static double td_bad_spec(int ctx, int cpu)
380{
381 double bad_spec = 0;
382 double total_slots;
383 double total;
384
385 total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) -
386 avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) +
387 avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]);
388 total_slots = td_total_slots(ctx, cpu);
389 if (total_slots)
390 bad_spec = total / total_slots;
391 return sanitize_val(bad_spec);
392}
393
394static double td_retiring(int ctx, int cpu)
395{
396 double retiring = 0;
397 double total_slots = td_total_slots(ctx, cpu);
398 double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]);
399
400 if (total_slots)
401 retiring = ret_slots / total_slots;
402 return retiring;
403}
404
405static double td_fe_bound(int ctx, int cpu)
406{
407 double fe_bound = 0;
408 double total_slots = td_total_slots(ctx, cpu);
409 double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]);
410
411 if (total_slots)
412 fe_bound = fetch_bub / total_slots;
413 return fe_bound;
414}
415
416static double td_be_bound(int ctx, int cpu)
417{
418 double sum = (td_fe_bound(ctx, cpu) +
419 td_bad_spec(ctx, cpu) +
420 td_retiring(ctx, cpu));
421 if (sum == 0)
422 return 0;
423 return sanitize_val(1.0 - sum);
424}
425
426void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
427 double avg, int cpu,
428 struct perf_stat_output_ctx *out)
429{
430 void *ctxp = out->ctx;
431 print_metric_t print_metric = out->print_metric;
432 double total, ratio = 0.0, total2;
433 const char *color = NULL;
434 int ctx = evsel_context(evsel);
435
436 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
437 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
438 if (total) {
439 ratio = avg / total;
440 print_metric(ctxp, NULL, "%7.2f ",
441 "insn per cycle", ratio);
442 } else {
443 print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
444 }
445 total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
446 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
447
448 if (total && avg) {
449 out->new_line(ctxp);
450 ratio = total / avg;
451 print_metric(ctxp, NULL, "%7.2f ",
452 "stalled cycles per insn",
453 ratio);
454 } else if (have_frontend_stalled) {
455 print_metric(ctxp, NULL, NULL,
456 "stalled cycles per insn", 0);
457 }
458 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
459 if (runtime_branches_stats[ctx][cpu].n != 0)
460 print_branch_misses(cpu, evsel, avg, out);
461 else
462 print_metric(ctxp, NULL, NULL, "of all branches", 0);
463 } else if (
464 evsel->attr.type == PERF_TYPE_HW_CACHE &&
465 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
466 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
467 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
468 if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
469 print_l1_dcache_misses(cpu, evsel, avg, out);
470 else
471 print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
472 } else if (
473 evsel->attr.type == PERF_TYPE_HW_CACHE &&
474 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
475 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
476 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
477 if (runtime_l1_icache_stats[ctx][cpu].n != 0)
478 print_l1_icache_misses(cpu, evsel, avg, out);
479 else
480 print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
481 } else if (
482 evsel->attr.type == PERF_TYPE_HW_CACHE &&
483 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
484 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
485 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
486 if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
487 print_dtlb_cache_misses(cpu, evsel, avg, out);
488 else
489 print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
490 } else if (
491 evsel->attr.type == PERF_TYPE_HW_CACHE &&
492 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
493 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
494 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
495 if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
496 print_itlb_cache_misses(cpu, evsel, avg, out);
497 else
498 print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
499 } else if (
500 evsel->attr.type == PERF_TYPE_HW_CACHE &&
501 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
502 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
503 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
504 if (runtime_ll_cache_stats[ctx][cpu].n != 0)
505 print_ll_cache_misses(cpu, evsel, avg, out);
506 else
507 print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
508 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
509 total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
510
511 if (total)
512 ratio = avg * 100 / total;
513
514 if (runtime_cacherefs_stats[ctx][cpu].n != 0)
515 print_metric(ctxp, NULL, "%8.3f %%",
516 "of all cache refs", ratio);
517 else
518 print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
519 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
520 print_stalled_cycles_frontend(cpu, evsel, avg, out);
521 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
522 print_stalled_cycles_backend(cpu, evsel, avg, out);
523 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
524 total = avg_stats(&runtime_nsecs_stats[cpu]);
525
526 if (total) {
527 ratio = avg / total;
528 print_metric(ctxp, NULL, "%8.3f", "GHz", ratio);
529 } else {
530 print_metric(ctxp, NULL, NULL, "Ghz", 0);
531 }
532 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
533 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
534 if (total)
535 print_metric(ctxp, NULL,
536 "%7.2f%%", "transactional cycles",
537 100.0 * (avg / total));
538 else
539 print_metric(ctxp, NULL, NULL, "transactional cycles",
540 0);
541 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
542 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
543 total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
544 if (total2 < avg)
545 total2 = avg;
546 if (total)
547 print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles",
548 100.0 * ((total2-avg) / total));
549 else
550 print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
551 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
552 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
553
554 if (avg)
555 ratio = total / avg;
556
557 if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
558 print_metric(ctxp, NULL, "%8.0f",
559 "cycles / transaction", ratio);
560 else
561 print_metric(ctxp, NULL, NULL, "cycles / transaction",
562 0);
563 } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
564 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
565
566 if (avg)
567 ratio = total / avg;
568
569 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
570 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
571 perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
572 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
573 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
574 avg / ratio);
575 else
576 print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
577 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
578 double fe_bound = td_fe_bound(ctx, cpu);
579
580 if (fe_bound > 0.2)
581 color = PERF_COLOR_RED;
582 print_metric(ctxp, color, "%8.1f%%", "frontend bound",
583 fe_bound * 100.);
584 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
585 double retiring = td_retiring(ctx, cpu);
586
587 if (retiring > 0.7)
588 color = PERF_COLOR_GREEN;
589 print_metric(ctxp, color, "%8.1f%%", "retiring",
590 retiring * 100.);
591 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
592 double bad_spec = td_bad_spec(ctx, cpu);
593
594 if (bad_spec > 0.1)
595 color = PERF_COLOR_RED;
596 print_metric(ctxp, color, "%8.1f%%", "bad speculation",
597 bad_spec * 100.);
598 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
599 double be_bound = td_be_bound(ctx, cpu);
600 const char *name = "backend bound";
601 static int have_recovery_bubbles = -1;
602
603 /* In case the CPU does not support topdown-recovery-bubbles */
604 if (have_recovery_bubbles < 0)
605 have_recovery_bubbles = pmu_have_event("cpu",
606 "topdown-recovery-bubbles");
607 if (!have_recovery_bubbles)
608 name = "backend bound/bad spec";
609
610 if (be_bound > 0.2)
611 color = PERF_COLOR_RED;
612 if (td_total_slots(ctx, cpu) > 0)
613 print_metric(ctxp, color, "%8.1f%%", name,
614 be_bound * 100.);
615 else
616 print_metric(ctxp, NULL, NULL, name, 0);
617 } else if (runtime_nsecs_stats[cpu].n != 0) {
618 char unit = 'M';
619 char unit_buf[10];
620
621 total = avg_stats(&runtime_nsecs_stats[cpu]);
622
623 if (total)
624 ratio = 1000.0 * avg / total;
625 if (ratio < 0.001) {
626 ratio *= 1000;
627 unit = 'K';
628 }
629 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
630 print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
631 } else {
632 print_metric(ctxp, NULL, NULL, NULL, 0);
633 }
634}