Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <stddef.h>
3#include <stdlib.h>
4#include <string.h>
5#include <errno.h>
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <unistd.h>
9#include <api/fs/fs.h>
10#include <linux/kernel.h>
11#include "map_symbol.h"
12#include "mem-events.h"
13#include "debug.h"
14#include "symbol.h"
15#include "pmu.h"
16#include "pmus.h"
17
18unsigned int perf_mem_events__loads_ldlat = 30;
19
20#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
21
22static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
23 E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"),
24 E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"),
25 E(NULL, NULL, NULL),
26};
27#undef E
28
29static char mem_loads_name[100];
30static bool mem_loads_name__init;
31
32struct perf_mem_event * __weak perf_mem_events__ptr(int i)
33{
34 if (i >= PERF_MEM_EVENTS__MAX)
35 return NULL;
36
37 return &perf_mem_events[i];
38}
39
40const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
41{
42 struct perf_mem_event *e = perf_mem_events__ptr(i);
43
44 if (!e)
45 return NULL;
46
47 if (i == PERF_MEM_EVENTS__LOAD) {
48 if (!mem_loads_name__init) {
49 mem_loads_name__init = true;
50 scnprintf(mem_loads_name, sizeof(mem_loads_name),
51 e->name, perf_mem_events__loads_ldlat);
52 }
53 return mem_loads_name;
54 }
55
56 return e->name;
57}
58
59__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
60{
61 return false;
62}
63
64int perf_mem_events__parse(const char *str)
65{
66 char *tok, *saveptr = NULL;
67 bool found = false;
68 char *buf;
69 int j;
70
71 /* We need buffer that we know we can write to. */
72 buf = malloc(strlen(str) + 1);
73 if (!buf)
74 return -ENOMEM;
75
76 strcpy(buf, str);
77
78 tok = strtok_r((char *)buf, ",", &saveptr);
79
80 while (tok) {
81 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
82 struct perf_mem_event *e = perf_mem_events__ptr(j);
83
84 if (!e->tag)
85 continue;
86
87 if (strstr(e->tag, tok))
88 e->record = found = true;
89 }
90
91 tok = strtok_r(NULL, ",", &saveptr);
92 }
93
94 free(buf);
95
96 if (found)
97 return 0;
98
99 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
100 return -1;
101}
102
103static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
104 struct perf_mem_event *e)
105{
106 char sysfs_name[100];
107 char path[PATH_MAX];
108 struct stat st;
109
110 scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
111 scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
112 return !stat(path, &st);
113}
114
115int perf_mem_events__init(void)
116{
117 const char *mnt = sysfs__mount();
118 bool found = false;
119 int j;
120
121 if (!mnt)
122 return -ENOENT;
123
124 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
125 struct perf_mem_event *e = perf_mem_events__ptr(j);
126 struct perf_pmu *pmu = NULL;
127
128 /*
129 * If the event entry isn't valid, skip initialization
130 * and "e->supported" will keep false.
131 */
132 if (!e->tag)
133 continue;
134
135 /*
136 * Scan all PMUs not just core ones, since perf mem/c2c on
137 * platforms like AMD uses IBS OP PMU which is independent
138 * of core PMU.
139 */
140 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
141 e->supported |= perf_mem_event__supported(mnt, pmu, e);
142 if (e->supported) {
143 found = true;
144 break;
145 }
146 }
147 }
148
149 return found ? 0 : -ENOENT;
150}
151
152void perf_mem_events__list(void)
153{
154 int j;
155
156 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
157 struct perf_mem_event *e = perf_mem_events__ptr(j);
158
159 fprintf(stderr, "%-*s%-*s%s",
160 e->tag ? 13 : 0,
161 e->tag ? : "",
162 e->tag && verbose > 0 ? 25 : 0,
163 e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "",
164 e->supported ? ": available\n" : "");
165 }
166}
167
168static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
169 int idx)
170{
171 const char *mnt = sysfs__mount();
172 struct perf_pmu *pmu = NULL;
173
174 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
175 if (!perf_mem_event__supported(mnt, pmu, e)) {
176 pr_err("failed: event '%s' not supported\n",
177 perf_mem_events__name(idx, pmu->name));
178 }
179 }
180}
181
182int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
183 char **rec_tmp, int *tmp_nr)
184{
185 const char *mnt = sysfs__mount();
186 int i = *argv_nr, k = 0;
187 struct perf_mem_event *e;
188
189 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
190 e = perf_mem_events__ptr(j);
191 if (!e->record)
192 continue;
193
194 if (perf_pmus__num_mem_pmus() == 1) {
195 if (!e->supported) {
196 pr_err("failed: event '%s' not supported\n",
197 perf_mem_events__name(j, NULL));
198 return -1;
199 }
200
201 rec_argv[i++] = "-e";
202 rec_argv[i++] = perf_mem_events__name(j, NULL);
203 } else {
204 struct perf_pmu *pmu = NULL;
205
206 if (!e->supported) {
207 perf_mem_events__print_unsupport_hybrid(e, j);
208 return -1;
209 }
210
211 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
212 const char *s = perf_mem_events__name(j, pmu->name);
213
214 if (!perf_mem_event__supported(mnt, pmu, e))
215 continue;
216
217 rec_argv[i++] = "-e";
218 if (s) {
219 char *copy = strdup(s);
220 if (!copy)
221 return -1;
222
223 rec_argv[i++] = copy;
224 rec_tmp[k++] = copy;
225 }
226 }
227 }
228 }
229
230 *argv_nr = i;
231 *tmp_nr = k;
232 return 0;
233}
234
235static const char * const tlb_access[] = {
236 "N/A",
237 "HIT",
238 "MISS",
239 "L1",
240 "L2",
241 "Walker",
242 "Fault",
243};
244
245int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
246{
247 size_t l = 0, i;
248 u64 m = PERF_MEM_TLB_NA;
249 u64 hit, miss;
250
251 sz -= 1; /* -1 for null termination */
252 out[0] = '\0';
253
254 if (mem_info)
255 m = mem_info->data_src.mem_dtlb;
256
257 hit = m & PERF_MEM_TLB_HIT;
258 miss = m & PERF_MEM_TLB_MISS;
259
260 /* already taken care of */
261 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
262
263 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
264 if (!(m & 0x1))
265 continue;
266 if (l) {
267 strcat(out, " or ");
268 l += 4;
269 }
270 l += scnprintf(out + l, sz - l, tlb_access[i]);
271 }
272 if (*out == '\0')
273 l += scnprintf(out, sz - l, "N/A");
274 if (hit)
275 l += scnprintf(out + l, sz - l, " hit");
276 if (miss)
277 l += scnprintf(out + l, sz - l, " miss");
278
279 return l;
280}
281
282static const char * const mem_lvl[] = {
283 "N/A",
284 "HIT",
285 "MISS",
286 "L1",
287 "LFB/MAB",
288 "L2",
289 "L3",
290 "Local RAM",
291 "Remote RAM (1 hop)",
292 "Remote RAM (2 hops)",
293 "Remote Cache (1 hop)",
294 "Remote Cache (2 hops)",
295 "I/O",
296 "Uncached",
297};
298
299static const char * const mem_lvlnum[] = {
300 [PERF_MEM_LVLNUM_UNC] = "Uncached",
301 [PERF_MEM_LVLNUM_CXL] = "CXL",
302 [PERF_MEM_LVLNUM_IO] = "I/O",
303 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
304 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
305 [PERF_MEM_LVLNUM_RAM] = "RAM",
306 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
307 [PERF_MEM_LVLNUM_NA] = "N/A",
308};
309
310static const char * const mem_hops[] = {
311 "N/A",
312 /*
313 * While printing, 'Remote' will be added to represent
314 * 'Remote core, same node' accesses as remote field need
315 * to be set with mem_hops field.
316 */
317 "core, same node",
318 "node, same socket",
319 "socket, same board",
320 "board",
321};
322
323static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
324{
325 u64 op = PERF_MEM_LOCK_NA;
326 int l;
327
328 if (mem_info)
329 op = mem_info->data_src.mem_op;
330
331 if (op & PERF_MEM_OP_NA)
332 l = scnprintf(out, sz, "N/A");
333 else if (op & PERF_MEM_OP_LOAD)
334 l = scnprintf(out, sz, "LOAD");
335 else if (op & PERF_MEM_OP_STORE)
336 l = scnprintf(out, sz, "STORE");
337 else if (op & PERF_MEM_OP_PFETCH)
338 l = scnprintf(out, sz, "PFETCH");
339 else if (op & PERF_MEM_OP_EXEC)
340 l = scnprintf(out, sz, "EXEC");
341 else
342 l = scnprintf(out, sz, "No");
343
344 return l;
345}
346
347int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
348{
349 union perf_mem_data_src data_src;
350 int printed = 0;
351 size_t l = 0;
352 size_t i;
353 int lvl;
354 char hit_miss[5] = {0};
355
356 sz -= 1; /* -1 for null termination */
357 out[0] = '\0';
358
359 if (!mem_info)
360 goto na;
361
362 data_src = mem_info->data_src;
363
364 if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
365 memcpy(hit_miss, "hit", 3);
366 else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
367 memcpy(hit_miss, "miss", 4);
368
369 lvl = data_src.mem_lvl_num;
370 if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
371 if (data_src.mem_remote) {
372 strcat(out, "Remote ");
373 l += 7;
374 }
375
376 if (data_src.mem_hops)
377 l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
378
379 if (mem_lvlnum[lvl])
380 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
381 else
382 l += scnprintf(out + l, sz - l, "L%d", lvl);
383
384 l += scnprintf(out + l, sz - l, " %s", hit_miss);
385 return l;
386 }
387
388 lvl = data_src.mem_lvl;
389 if (!lvl)
390 goto na;
391
392 lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
393 if (!lvl)
394 goto na;
395
396 for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
397 if (!(lvl & 0x1))
398 continue;
399 if (printed++) {
400 strcat(out, " or ");
401 l += 4;
402 }
403 l += scnprintf(out + l, sz - l, mem_lvl[i]);
404 }
405
406 if (printed) {
407 l += scnprintf(out + l, sz - l, " %s", hit_miss);
408 return l;
409 }
410
411na:
412 strcat(out, "N/A");
413 return 3;
414}
415
416static const char * const snoop_access[] = {
417 "N/A",
418 "None",
419 "Hit",
420 "Miss",
421 "HitM",
422};
423
424static const char * const snoopx_access[] = {
425 "Fwd",
426 "Peer",
427};
428
429int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
430{
431 size_t i, l = 0;
432 u64 m = PERF_MEM_SNOOP_NA;
433
434 sz -= 1; /* -1 for null termination */
435 out[0] = '\0';
436
437 if (mem_info)
438 m = mem_info->data_src.mem_snoop;
439
440 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
441 if (!(m & 0x1))
442 continue;
443 if (l) {
444 strcat(out, " or ");
445 l += 4;
446 }
447 l += scnprintf(out + l, sz - l, snoop_access[i]);
448 }
449
450 m = 0;
451 if (mem_info)
452 m = mem_info->data_src.mem_snoopx;
453
454 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
455 if (!(m & 0x1))
456 continue;
457
458 if (l) {
459 strcat(out, " or ");
460 l += 4;
461 }
462 l += scnprintf(out + l, sz - l, snoopx_access[i]);
463 }
464
465 if (*out == '\0')
466 l += scnprintf(out, sz - l, "N/A");
467
468 return l;
469}
470
471int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
472{
473 u64 mask = PERF_MEM_LOCK_NA;
474 int l;
475
476 if (mem_info)
477 mask = mem_info->data_src.mem_lock;
478
479 if (mask & PERF_MEM_LOCK_NA)
480 l = scnprintf(out, sz, "N/A");
481 else if (mask & PERF_MEM_LOCK_LOCKED)
482 l = scnprintf(out, sz, "Yes");
483 else
484 l = scnprintf(out, sz, "No");
485
486 return l;
487}
488
489int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
490{
491 size_t l = 0;
492 u64 mask = PERF_MEM_BLK_NA;
493
494 sz -= 1; /* -1 for null termination */
495 out[0] = '\0';
496
497 if (mem_info)
498 mask = mem_info->data_src.mem_blk;
499
500 if (!mask || (mask & PERF_MEM_BLK_NA)) {
501 l += scnprintf(out + l, sz - l, " N/A");
502 return l;
503 }
504 if (mask & PERF_MEM_BLK_DATA)
505 l += scnprintf(out + l, sz - l, " Data");
506 if (mask & PERF_MEM_BLK_ADDR)
507 l += scnprintf(out + l, sz - l, " Addr");
508
509 return l;
510}
511
512int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
513{
514 int i = 0;
515
516 i += scnprintf(out, sz, "|OP ");
517 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
518 i += scnprintf(out + i, sz - i, "|LVL ");
519 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
520 i += scnprintf(out + i, sz - i, "|SNP ");
521 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
522 i += scnprintf(out + i, sz - i, "|TLB ");
523 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
524 i += scnprintf(out + i, sz - i, "|LCK ");
525 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
526 i += scnprintf(out + i, sz - i, "|BLK ");
527 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
528
529 return i;
530}
531
532int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
533{
534 union perf_mem_data_src *data_src = &mi->data_src;
535 u64 daddr = mi->daddr.addr;
536 u64 op = data_src->mem_op;
537 u64 lvl = data_src->mem_lvl;
538 u64 snoop = data_src->mem_snoop;
539 u64 snoopx = data_src->mem_snoopx;
540 u64 lock = data_src->mem_lock;
541 u64 blk = data_src->mem_blk;
542 /*
543 * Skylake might report unknown remote level via this
544 * bit, consider it when evaluating remote HITMs.
545 *
546 * Incase of power, remote field can also be used to denote cache
547 * accesses from the another core of same node. Hence, setting
548 * mrem only when HOPS is zero along with set remote field.
549 */
550 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
551 int err = 0;
552
553#define HITM_INC(__f) \
554do { \
555 stats->__f++; \
556 stats->tot_hitm++; \
557} while (0)
558
559#define PEER_INC(__f) \
560do { \
561 stats->__f++; \
562 stats->tot_peer++; \
563} while (0)
564
565#define P(a, b) PERF_MEM_##a##_##b
566
567 stats->nr_entries++;
568
569 if (lock & P(LOCK, LOCKED)) stats->locks++;
570
571 if (blk & P(BLK, DATA)) stats->blk_data++;
572 if (blk & P(BLK, ADDR)) stats->blk_addr++;
573
574 if (op & P(OP, LOAD)) {
575 /* load */
576 stats->load++;
577
578 if (!daddr) {
579 stats->ld_noadrs++;
580 return -1;
581 }
582
583 if (lvl & P(LVL, HIT)) {
584 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
585 if (lvl & P(LVL, IO)) stats->ld_io++;
586 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
587 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
588 if (lvl & P(LVL, L2)) {
589 stats->ld_l2hit++;
590
591 if (snoopx & P(SNOOPX, PEER))
592 PEER_INC(lcl_peer);
593 }
594 if (lvl & P(LVL, L3 )) {
595 if (snoop & P(SNOOP, HITM))
596 HITM_INC(lcl_hitm);
597 else
598 stats->ld_llchit++;
599
600 if (snoopx & P(SNOOPX, PEER))
601 PEER_INC(lcl_peer);
602 }
603
604 if (lvl & P(LVL, LOC_RAM)) {
605 stats->lcl_dram++;
606 if (snoop & P(SNOOP, HIT))
607 stats->ld_shared++;
608 else
609 stats->ld_excl++;
610 }
611
612 if ((lvl & P(LVL, REM_RAM1)) ||
613 (lvl & P(LVL, REM_RAM2)) ||
614 mrem) {
615 stats->rmt_dram++;
616 if (snoop & P(SNOOP, HIT))
617 stats->ld_shared++;
618 else
619 stats->ld_excl++;
620 }
621 }
622
623 if ((lvl & P(LVL, REM_CCE1)) ||
624 (lvl & P(LVL, REM_CCE2)) ||
625 mrem) {
626 if (snoop & P(SNOOP, HIT)) {
627 stats->rmt_hit++;
628 } else if (snoop & P(SNOOP, HITM)) {
629 HITM_INC(rmt_hitm);
630 } else if (snoopx & P(SNOOPX, PEER)) {
631 stats->rmt_hit++;
632 PEER_INC(rmt_peer);
633 }
634 }
635
636 if ((lvl & P(LVL, MISS)))
637 stats->ld_miss++;
638
639 } else if (op & P(OP, STORE)) {
640 /* store */
641 stats->store++;
642
643 if (!daddr) {
644 stats->st_noadrs++;
645 return -1;
646 }
647
648 if (lvl & P(LVL, HIT)) {
649 if (lvl & P(LVL, UNC)) stats->st_uncache++;
650 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
651 }
652 if (lvl & P(LVL, MISS))
653 if (lvl & P(LVL, L1)) stats->st_l1miss++;
654 if (lvl & P(LVL, NA))
655 stats->st_na++;
656 } else {
657 /* unparsable data_src? */
658 stats->noparse++;
659 return -1;
660 }
661
662 if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
663 stats->nomap++;
664 return -1;
665 }
666
667#undef P
668#undef HITM_INC
669 return err;
670}
671
672void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
673{
674 stats->nr_entries += add->nr_entries;
675
676 stats->locks += add->locks;
677 stats->store += add->store;
678 stats->st_uncache += add->st_uncache;
679 stats->st_noadrs += add->st_noadrs;
680 stats->st_l1hit += add->st_l1hit;
681 stats->st_l1miss += add->st_l1miss;
682 stats->st_na += add->st_na;
683 stats->load += add->load;
684 stats->ld_excl += add->ld_excl;
685 stats->ld_shared += add->ld_shared;
686 stats->ld_uncache += add->ld_uncache;
687 stats->ld_io += add->ld_io;
688 stats->ld_miss += add->ld_miss;
689 stats->ld_noadrs += add->ld_noadrs;
690 stats->ld_fbhit += add->ld_fbhit;
691 stats->ld_l1hit += add->ld_l1hit;
692 stats->ld_l2hit += add->ld_l2hit;
693 stats->ld_llchit += add->ld_llchit;
694 stats->lcl_hitm += add->lcl_hitm;
695 stats->rmt_hitm += add->rmt_hitm;
696 stats->tot_hitm += add->tot_hitm;
697 stats->lcl_peer += add->lcl_peer;
698 stats->rmt_peer += add->rmt_peer;
699 stats->tot_peer += add->tot_peer;
700 stats->rmt_hit += add->rmt_hit;
701 stats->lcl_dram += add->lcl_dram;
702 stats->rmt_dram += add->rmt_dram;
703 stats->blk_data += add->blk_data;
704 stats->blk_addr += add->blk_addr;
705 stats->nomap += add->nomap;
706 stats->noparse += add->noparse;
707}
1// SPDX-License-Identifier: GPL-2.0
2#include <stddef.h>
3#include <stdlib.h>
4#include <string.h>
5#include <errno.h>
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <unistd.h>
9#include <api/fs/fs.h>
10#include <linux/kernel.h>
11#include "map_symbol.h"
12#include "mem-events.h"
13#include "debug.h"
14#include "symbol.h"
15#include "pmu.h"
16#include "pmus.h"
17
18unsigned int perf_mem_events__loads_ldlat = 30;
19
20#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
21
22struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
23 E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "mem-loads", true, 0),
24 E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0),
25 E(NULL, NULL, NULL, false, 0),
26};
27#undef E
28
29static char mem_loads_name[100];
30static char mem_stores_name[100];
31
32struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
33{
34 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
35 return NULL;
36
37 return &pmu->mem_events[i];
38}
39
40static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
41{
42 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
43 if (pmu->mem_events)
44 return pmu;
45 }
46 return NULL;
47}
48
49struct perf_pmu *perf_mem_events_find_pmu(void)
50{
51 /*
52 * The current perf mem doesn't support per-PMU configuration.
53 * The exact same configuration is applied to all the
54 * mem_events supported PMUs.
55 * Return the first mem_events supported PMU.
56 *
57 * Notes: The only case which may support multiple mem_events
58 * supported PMUs is Intel hybrid. The exact same mem_events
59 * is shared among the PMUs. Only configure the first PMU
60 * is good enough as well.
61 */
62 return perf_pmus__scan_mem(NULL);
63}
64
65/**
66 * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
67 * @pmu: Start pmu. If it's NULL, search the entire PMU list.
68 */
69int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
70{
71 int num = 0;
72
73 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
74 num++;
75
76 return num;
77}
78
79static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
80{
81 struct perf_mem_event *e;
82
83 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
84 return NULL;
85
86 e = &pmu->mem_events[i];
87 if (!e)
88 return NULL;
89
90 if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
91 if (e->ldlat) {
92 if (!e->aux_event) {
93 /* ARM and Most of Intel */
94 scnprintf(mem_loads_name, sizeof(mem_loads_name),
95 e->name, pmu->name,
96 perf_mem_events__loads_ldlat);
97 } else {
98 /* Intel with mem-loads-aux event */
99 scnprintf(mem_loads_name, sizeof(mem_loads_name),
100 e->name, pmu->name, pmu->name,
101 perf_mem_events__loads_ldlat);
102 }
103 } else {
104 if (!e->aux_event) {
105 /* AMD and POWER */
106 scnprintf(mem_loads_name, sizeof(mem_loads_name),
107 e->name, pmu->name);
108 } else
109 return NULL;
110 }
111
112 return mem_loads_name;
113 }
114
115 if (i == PERF_MEM_EVENTS__STORE) {
116 scnprintf(mem_stores_name, sizeof(mem_stores_name),
117 e->name, pmu->name);
118 return mem_stores_name;
119 }
120
121 return NULL;
122}
123
124bool is_mem_loads_aux_event(struct evsel *leader)
125{
126 struct perf_pmu *pmu = leader->pmu;
127 struct perf_mem_event *e;
128
129 if (!pmu || !pmu->mem_events)
130 return false;
131
132 e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
133 if (!e->aux_event)
134 return false;
135
136 return leader->core.attr.config == e->aux_event;
137}
138
139int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
140{
141 char *tok, *saveptr = NULL;
142 bool found = false;
143 char *buf;
144 int j;
145
146 /* We need buffer that we know we can write to. */
147 buf = malloc(strlen(str) + 1);
148 if (!buf)
149 return -ENOMEM;
150
151 strcpy(buf, str);
152
153 tok = strtok_r((char *)buf, ",", &saveptr);
154
155 while (tok) {
156 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
157 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
158
159 if (!e->tag)
160 continue;
161
162 if (strstr(e->tag, tok))
163 e->record = found = true;
164 }
165
166 tok = strtok_r(NULL, ",", &saveptr);
167 }
168
169 free(buf);
170
171 if (found)
172 return 0;
173
174 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
175 return -1;
176}
177
178static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
179 struct perf_mem_event *e)
180{
181 char path[PATH_MAX];
182 struct stat st;
183
184 if (!e->event_name)
185 return true;
186
187 scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
188
189 return !stat(path, &st);
190}
191
192int perf_pmu__mem_events_init(struct perf_pmu *pmu)
193{
194 const char *mnt = sysfs__mount();
195 bool found = false;
196 int j;
197
198 if (!mnt)
199 return -ENOENT;
200
201 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
202 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
203
204 /*
205 * If the event entry isn't valid, skip initialization
206 * and "e->supported" will keep false.
207 */
208 if (!e->tag)
209 continue;
210
211 e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
212 if (e->supported)
213 found = true;
214 }
215
216 return found ? 0 : -ENOENT;
217}
218
219void perf_pmu__mem_events_list(struct perf_pmu *pmu)
220{
221 int j;
222
223 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
224 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
225
226 fprintf(stderr, "%-*s%-*s%s",
227 e->tag ? 13 : 0,
228 e->tag ? : "",
229 e->tag && verbose > 0 ? 25 : 0,
230 e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
231 e->supported ? ": available\n" : "");
232 }
233}
234
235int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
236{
237 const char *mnt = sysfs__mount();
238 struct perf_pmu *pmu = NULL;
239 struct perf_mem_event *e;
240 int i = *argv_nr;
241 const char *s;
242 char *copy;
243
244 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
245 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
246 e = perf_pmu__mem_events_ptr(pmu, j);
247
248 if (!e->record)
249 continue;
250
251 if (!e->supported) {
252 pr_err("failed: event '%s' not supported\n",
253 perf_pmu__mem_events_name(j, pmu));
254 return -1;
255 }
256
257 s = perf_pmu__mem_events_name(j, pmu);
258 if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
259 continue;
260
261 copy = strdup(s);
262 if (!copy)
263 return -1;
264
265 rec_argv[i++] = "-e";
266 rec_argv[i++] = copy;
267 }
268 }
269
270 *argv_nr = i;
271 return 0;
272}
273
274static const char * const tlb_access[] = {
275 "N/A",
276 "HIT",
277 "MISS",
278 "L1",
279 "L2",
280 "Walker",
281 "Fault",
282};
283
284int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
285{
286 size_t l = 0, i;
287 u64 m = PERF_MEM_TLB_NA;
288 u64 hit, miss;
289
290 sz -= 1; /* -1 for null termination */
291 out[0] = '\0';
292
293 if (mem_info)
294 m = mem_info->data_src.mem_dtlb;
295
296 hit = m & PERF_MEM_TLB_HIT;
297 miss = m & PERF_MEM_TLB_MISS;
298
299 /* already taken care of */
300 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
301
302 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
303 if (!(m & 0x1))
304 continue;
305 if (l) {
306 strcat(out, " or ");
307 l += 4;
308 }
309 l += scnprintf(out + l, sz - l, tlb_access[i]);
310 }
311 if (*out == '\0')
312 l += scnprintf(out, sz - l, "N/A");
313 if (hit)
314 l += scnprintf(out + l, sz - l, " hit");
315 if (miss)
316 l += scnprintf(out + l, sz - l, " miss");
317
318 return l;
319}
320
321static const char * const mem_lvl[] = {
322 "N/A",
323 "HIT",
324 "MISS",
325 "L1",
326 "LFB/MAB",
327 "L2",
328 "L3",
329 "Local RAM",
330 "Remote RAM (1 hop)",
331 "Remote RAM (2 hops)",
332 "Remote Cache (1 hop)",
333 "Remote Cache (2 hops)",
334 "I/O",
335 "Uncached",
336};
337
338static const char * const mem_lvlnum[] = {
339 [PERF_MEM_LVLNUM_UNC] = "Uncached",
340 [PERF_MEM_LVLNUM_CXL] = "CXL",
341 [PERF_MEM_LVLNUM_IO] = "I/O",
342 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
343 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
344 [PERF_MEM_LVLNUM_RAM] = "RAM",
345 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
346 [PERF_MEM_LVLNUM_NA] = "N/A",
347};
348
349static const char * const mem_hops[] = {
350 "N/A",
351 /*
352 * While printing, 'Remote' will be added to represent
353 * 'Remote core, same node' accesses as remote field need
354 * to be set with mem_hops field.
355 */
356 "core, same node",
357 "node, same socket",
358 "socket, same board",
359 "board",
360};
361
362static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
363{
364 u64 op = PERF_MEM_LOCK_NA;
365 int l;
366
367 if (mem_info)
368 op = mem_info->data_src.mem_op;
369
370 if (op & PERF_MEM_OP_NA)
371 l = scnprintf(out, sz, "N/A");
372 else if (op & PERF_MEM_OP_LOAD)
373 l = scnprintf(out, sz, "LOAD");
374 else if (op & PERF_MEM_OP_STORE)
375 l = scnprintf(out, sz, "STORE");
376 else if (op & PERF_MEM_OP_PFETCH)
377 l = scnprintf(out, sz, "PFETCH");
378 else if (op & PERF_MEM_OP_EXEC)
379 l = scnprintf(out, sz, "EXEC");
380 else
381 l = scnprintf(out, sz, "No");
382
383 return l;
384}
385
386int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
387{
388 union perf_mem_data_src data_src;
389 int printed = 0;
390 size_t l = 0;
391 size_t i;
392 int lvl;
393 char hit_miss[5] = {0};
394
395 sz -= 1; /* -1 for null termination */
396 out[0] = '\0';
397
398 if (!mem_info)
399 goto na;
400
401 data_src = mem_info->data_src;
402
403 if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
404 memcpy(hit_miss, "hit", 3);
405 else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
406 memcpy(hit_miss, "miss", 4);
407
408 lvl = data_src.mem_lvl_num;
409 if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
410 if (data_src.mem_remote) {
411 strcat(out, "Remote ");
412 l += 7;
413 }
414
415 if (data_src.mem_hops)
416 l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
417
418 if (mem_lvlnum[lvl])
419 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
420 else
421 l += scnprintf(out + l, sz - l, "L%d", lvl);
422
423 l += scnprintf(out + l, sz - l, " %s", hit_miss);
424 return l;
425 }
426
427 lvl = data_src.mem_lvl;
428 if (!lvl)
429 goto na;
430
431 lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
432 if (!lvl)
433 goto na;
434
435 for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
436 if (!(lvl & 0x1))
437 continue;
438 if (printed++) {
439 strcat(out, " or ");
440 l += 4;
441 }
442 l += scnprintf(out + l, sz - l, mem_lvl[i]);
443 }
444
445 if (printed) {
446 l += scnprintf(out + l, sz - l, " %s", hit_miss);
447 return l;
448 }
449
450na:
451 strcat(out, "N/A");
452 return 3;
453}
454
455static const char * const snoop_access[] = {
456 "N/A",
457 "None",
458 "Hit",
459 "Miss",
460 "HitM",
461};
462
463static const char * const snoopx_access[] = {
464 "Fwd",
465 "Peer",
466};
467
468int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
469{
470 size_t i, l = 0;
471 u64 m = PERF_MEM_SNOOP_NA;
472
473 sz -= 1; /* -1 for null termination */
474 out[0] = '\0';
475
476 if (mem_info)
477 m = mem_info->data_src.mem_snoop;
478
479 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
480 if (!(m & 0x1))
481 continue;
482 if (l) {
483 strcat(out, " or ");
484 l += 4;
485 }
486 l += scnprintf(out + l, sz - l, snoop_access[i]);
487 }
488
489 m = 0;
490 if (mem_info)
491 m = mem_info->data_src.mem_snoopx;
492
493 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
494 if (!(m & 0x1))
495 continue;
496
497 if (l) {
498 strcat(out, " or ");
499 l += 4;
500 }
501 l += scnprintf(out + l, sz - l, snoopx_access[i]);
502 }
503
504 if (*out == '\0')
505 l += scnprintf(out, sz - l, "N/A");
506
507 return l;
508}
509
510int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
511{
512 u64 mask = PERF_MEM_LOCK_NA;
513 int l;
514
515 if (mem_info)
516 mask = mem_info->data_src.mem_lock;
517
518 if (mask & PERF_MEM_LOCK_NA)
519 l = scnprintf(out, sz, "N/A");
520 else if (mask & PERF_MEM_LOCK_LOCKED)
521 l = scnprintf(out, sz, "Yes");
522 else
523 l = scnprintf(out, sz, "No");
524
525 return l;
526}
527
528int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
529{
530 size_t l = 0;
531 u64 mask = PERF_MEM_BLK_NA;
532
533 sz -= 1; /* -1 for null termination */
534 out[0] = '\0';
535
536 if (mem_info)
537 mask = mem_info->data_src.mem_blk;
538
539 if (!mask || (mask & PERF_MEM_BLK_NA)) {
540 l += scnprintf(out + l, sz - l, " N/A");
541 return l;
542 }
543 if (mask & PERF_MEM_BLK_DATA)
544 l += scnprintf(out + l, sz - l, " Data");
545 if (mask & PERF_MEM_BLK_ADDR)
546 l += scnprintf(out + l, sz - l, " Addr");
547
548 return l;
549}
550
551int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
552{
553 int i = 0;
554
555 i += scnprintf(out, sz, "|OP ");
556 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
557 i += scnprintf(out + i, sz - i, "|LVL ");
558 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
559 i += scnprintf(out + i, sz - i, "|SNP ");
560 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
561 i += scnprintf(out + i, sz - i, "|TLB ");
562 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
563 i += scnprintf(out + i, sz - i, "|LCK ");
564 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
565 i += scnprintf(out + i, sz - i, "|BLK ");
566 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
567
568 return i;
569}
570
571int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
572{
573 union perf_mem_data_src *data_src = &mi->data_src;
574 u64 daddr = mi->daddr.addr;
575 u64 op = data_src->mem_op;
576 u64 lvl = data_src->mem_lvl;
577 u64 snoop = data_src->mem_snoop;
578 u64 snoopx = data_src->mem_snoopx;
579 u64 lock = data_src->mem_lock;
580 u64 blk = data_src->mem_blk;
581 /*
582 * Skylake might report unknown remote level via this
583 * bit, consider it when evaluating remote HITMs.
584 *
585 * Incase of power, remote field can also be used to denote cache
586 * accesses from the another core of same node. Hence, setting
587 * mrem only when HOPS is zero along with set remote field.
588 */
589 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
590 int err = 0;
591
592#define HITM_INC(__f) \
593do { \
594 stats->__f++; \
595 stats->tot_hitm++; \
596} while (0)
597
598#define PEER_INC(__f) \
599do { \
600 stats->__f++; \
601 stats->tot_peer++; \
602} while (0)
603
604#define P(a, b) PERF_MEM_##a##_##b
605
606 stats->nr_entries++;
607
608 if (lock & P(LOCK, LOCKED)) stats->locks++;
609
610 if (blk & P(BLK, DATA)) stats->blk_data++;
611 if (blk & P(BLK, ADDR)) stats->blk_addr++;
612
613 if (op & P(OP, LOAD)) {
614 /* load */
615 stats->load++;
616
617 if (!daddr) {
618 stats->ld_noadrs++;
619 return -1;
620 }
621
622 if (lvl & P(LVL, HIT)) {
623 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
624 if (lvl & P(LVL, IO)) stats->ld_io++;
625 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
626 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
627 if (lvl & P(LVL, L2)) {
628 stats->ld_l2hit++;
629
630 if (snoopx & P(SNOOPX, PEER))
631 PEER_INC(lcl_peer);
632 }
633 if (lvl & P(LVL, L3 )) {
634 if (snoop & P(SNOOP, HITM))
635 HITM_INC(lcl_hitm);
636 else
637 stats->ld_llchit++;
638
639 if (snoopx & P(SNOOPX, PEER))
640 PEER_INC(lcl_peer);
641 }
642
643 if (lvl & P(LVL, LOC_RAM)) {
644 stats->lcl_dram++;
645 if (snoop & P(SNOOP, HIT))
646 stats->ld_shared++;
647 else
648 stats->ld_excl++;
649 }
650
651 if ((lvl & P(LVL, REM_RAM1)) ||
652 (lvl & P(LVL, REM_RAM2)) ||
653 mrem) {
654 stats->rmt_dram++;
655 if (snoop & P(SNOOP, HIT))
656 stats->ld_shared++;
657 else
658 stats->ld_excl++;
659 }
660 }
661
662 if ((lvl & P(LVL, REM_CCE1)) ||
663 (lvl & P(LVL, REM_CCE2)) ||
664 mrem) {
665 if (snoop & P(SNOOP, HIT)) {
666 stats->rmt_hit++;
667 } else if (snoop & P(SNOOP, HITM)) {
668 HITM_INC(rmt_hitm);
669 } else if (snoopx & P(SNOOPX, PEER)) {
670 stats->rmt_hit++;
671 PEER_INC(rmt_peer);
672 }
673 }
674
675 if ((lvl & P(LVL, MISS)))
676 stats->ld_miss++;
677
678 } else if (op & P(OP, STORE)) {
679 /* store */
680 stats->store++;
681
682 if (!daddr) {
683 stats->st_noadrs++;
684 return -1;
685 }
686
687 if (lvl & P(LVL, HIT)) {
688 if (lvl & P(LVL, UNC)) stats->st_uncache++;
689 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
690 }
691 if (lvl & P(LVL, MISS))
692 if (lvl & P(LVL, L1)) stats->st_l1miss++;
693 if (lvl & P(LVL, NA))
694 stats->st_na++;
695 } else {
696 /* unparsable data_src? */
697 stats->noparse++;
698 return -1;
699 }
700
701 if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
702 stats->nomap++;
703 return -1;
704 }
705
706#undef P
707#undef HITM_INC
708 return err;
709}
710
711void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
712{
713 stats->nr_entries += add->nr_entries;
714
715 stats->locks += add->locks;
716 stats->store += add->store;
717 stats->st_uncache += add->st_uncache;
718 stats->st_noadrs += add->st_noadrs;
719 stats->st_l1hit += add->st_l1hit;
720 stats->st_l1miss += add->st_l1miss;
721 stats->st_na += add->st_na;
722 stats->load += add->load;
723 stats->ld_excl += add->ld_excl;
724 stats->ld_shared += add->ld_shared;
725 stats->ld_uncache += add->ld_uncache;
726 stats->ld_io += add->ld_io;
727 stats->ld_miss += add->ld_miss;
728 stats->ld_noadrs += add->ld_noadrs;
729 stats->ld_fbhit += add->ld_fbhit;
730 stats->ld_l1hit += add->ld_l1hit;
731 stats->ld_l2hit += add->ld_l2hit;
732 stats->ld_llchit += add->ld_llchit;
733 stats->lcl_hitm += add->lcl_hitm;
734 stats->rmt_hitm += add->rmt_hitm;
735 stats->tot_hitm += add->tot_hitm;
736 stats->lcl_peer += add->lcl_peer;
737 stats->rmt_peer += add->rmt_peer;
738 stats->tot_peer += add->tot_peer;
739 stats->rmt_hit += add->rmt_hit;
740 stats->lcl_dram += add->lcl_dram;
741 stats->rmt_dram += add->rmt_dram;
742 stats->blk_data += add->blk_data;
743 stats->blk_addr += add->blk_addr;
744 stats->nomap += add->nomap;
745 stats->noparse += add->noparse;
746}