Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <stddef.h>
3#include <stdlib.h>
4#include <string.h>
5#include <errno.h>
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <unistd.h>
9#include <api/fs/fs.h>
10#include <linux/kernel.h>
11#include "map_symbol.h"
12#include "mem-events.h"
13#include "debug.h"
14#include "symbol.h"
15#include "pmu.h"
16#include "pmu-hybrid.h"
17
18unsigned int perf_mem_events__loads_ldlat = 30;
19
20#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
21
22static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
23 E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"),
24 E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"),
25 E(NULL, NULL, NULL),
26};
27#undef E
28
29static char mem_loads_name[100];
30static bool mem_loads_name__init;
31
32struct perf_mem_event * __weak perf_mem_events__ptr(int i)
33{
34 if (i >= PERF_MEM_EVENTS__MAX)
35 return NULL;
36
37 return &perf_mem_events[i];
38}
39
40char * __weak perf_mem_events__name(int i, char *pmu_name __maybe_unused)
41{
42 struct perf_mem_event *e = perf_mem_events__ptr(i);
43
44 if (!e)
45 return NULL;
46
47 if (i == PERF_MEM_EVENTS__LOAD) {
48 if (!mem_loads_name__init) {
49 mem_loads_name__init = true;
50 scnprintf(mem_loads_name, sizeof(mem_loads_name),
51 e->name, perf_mem_events__loads_ldlat);
52 }
53 return mem_loads_name;
54 }
55
56 return (char *)e->name;
57}
58
59__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
60{
61 return false;
62}
63
64int perf_mem_events__parse(const char *str)
65{
66 char *tok, *saveptr = NULL;
67 bool found = false;
68 char *buf;
69 int j;
70
71 /* We need buffer that we know we can write to. */
72 buf = malloc(strlen(str) + 1);
73 if (!buf)
74 return -ENOMEM;
75
76 strcpy(buf, str);
77
78 tok = strtok_r((char *)buf, ",", &saveptr);
79
80 while (tok) {
81 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
82 struct perf_mem_event *e = perf_mem_events__ptr(j);
83
84 if (!e->tag)
85 continue;
86
87 if (strstr(e->tag, tok))
88 e->record = found = true;
89 }
90
91 tok = strtok_r(NULL, ",", &saveptr);
92 }
93
94 free(buf);
95
96 if (found)
97 return 0;
98
99 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
100 return -1;
101}
102
103static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
104{
105 char path[PATH_MAX];
106 struct stat st;
107
108 scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
109 return !stat(path, &st);
110}
111
112int perf_mem_events__init(void)
113{
114 const char *mnt = sysfs__mount();
115 bool found = false;
116 int j;
117
118 if (!mnt)
119 return -ENOENT;
120
121 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
122 struct perf_mem_event *e = perf_mem_events__ptr(j);
123 struct perf_pmu *pmu;
124 char sysfs_name[100];
125
126 /*
127 * If the event entry isn't valid, skip initialization
128 * and "e->supported" will keep false.
129 */
130 if (!e->tag)
131 continue;
132
133 if (!perf_pmu__has_hybrid()) {
134 scnprintf(sysfs_name, sizeof(sysfs_name),
135 e->sysfs_name, "cpu");
136 e->supported = perf_mem_event__supported(mnt, sysfs_name);
137 } else {
138 perf_pmu__for_each_hybrid_pmu(pmu) {
139 scnprintf(sysfs_name, sizeof(sysfs_name),
140 e->sysfs_name, pmu->name);
141 e->supported |= perf_mem_event__supported(mnt, sysfs_name);
142 }
143 }
144
145 if (e->supported)
146 found = true;
147 }
148
149 return found ? 0 : -ENOENT;
150}
151
152void perf_mem_events__list(void)
153{
154 int j;
155
156 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
157 struct perf_mem_event *e = perf_mem_events__ptr(j);
158
159 fprintf(stderr, "%-*s%-*s%s",
160 e->tag ? 13 : 0,
161 e->tag ? : "",
162 e->tag && verbose > 0 ? 25 : 0,
163 e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "",
164 e->supported ? ": available\n" : "");
165 }
166}
167
168static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
169 int idx)
170{
171 const char *mnt = sysfs__mount();
172 char sysfs_name[100];
173 struct perf_pmu *pmu;
174
175 perf_pmu__for_each_hybrid_pmu(pmu) {
176 scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
177 pmu->name);
178 if (!perf_mem_event__supported(mnt, sysfs_name)) {
179 pr_err("failed: event '%s' not supported\n",
180 perf_mem_events__name(idx, pmu->name));
181 }
182 }
183}
184
185int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
186 char **rec_tmp, int *tmp_nr)
187{
188 int i = *argv_nr, k = 0;
189 struct perf_mem_event *e;
190 struct perf_pmu *pmu;
191 char *s;
192
193 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
194 e = perf_mem_events__ptr(j);
195 if (!e->record)
196 continue;
197
198 if (!perf_pmu__has_hybrid()) {
199 if (!e->supported) {
200 pr_err("failed: event '%s' not supported\n",
201 perf_mem_events__name(j, NULL));
202 return -1;
203 }
204
205 rec_argv[i++] = "-e";
206 rec_argv[i++] = perf_mem_events__name(j, NULL);
207 } else {
208 if (!e->supported) {
209 perf_mem_events__print_unsupport_hybrid(e, j);
210 return -1;
211 }
212
213 perf_pmu__for_each_hybrid_pmu(pmu) {
214 rec_argv[i++] = "-e";
215 s = perf_mem_events__name(j, pmu->name);
216 if (s) {
217 s = strdup(s);
218 if (!s)
219 return -1;
220
221 rec_argv[i++] = s;
222 rec_tmp[k++] = s;
223 }
224 }
225 }
226 }
227
228 *argv_nr = i;
229 *tmp_nr = k;
230 return 0;
231}
232
233static const char * const tlb_access[] = {
234 "N/A",
235 "HIT",
236 "MISS",
237 "L1",
238 "L2",
239 "Walker",
240 "Fault",
241};
242
243int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
244{
245 size_t l = 0, i;
246 u64 m = PERF_MEM_TLB_NA;
247 u64 hit, miss;
248
249 sz -= 1; /* -1 for null termination */
250 out[0] = '\0';
251
252 if (mem_info)
253 m = mem_info->data_src.mem_dtlb;
254
255 hit = m & PERF_MEM_TLB_HIT;
256 miss = m & PERF_MEM_TLB_MISS;
257
258 /* already taken care of */
259 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
260
261 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
262 if (!(m & 0x1))
263 continue;
264 if (l) {
265 strcat(out, " or ");
266 l += 4;
267 }
268 l += scnprintf(out + l, sz - l, tlb_access[i]);
269 }
270 if (*out == '\0')
271 l += scnprintf(out, sz - l, "N/A");
272 if (hit)
273 l += scnprintf(out + l, sz - l, " hit");
274 if (miss)
275 l += scnprintf(out + l, sz - l, " miss");
276
277 return l;
278}
279
280static const char * const mem_lvl[] = {
281 "N/A",
282 "HIT",
283 "MISS",
284 "L1",
285 "LFB/MAB",
286 "L2",
287 "L3",
288 "Local RAM",
289 "Remote RAM (1 hop)",
290 "Remote RAM (2 hops)",
291 "Remote Cache (1 hop)",
292 "Remote Cache (2 hops)",
293 "I/O",
294 "Uncached",
295};
296
297static const char * const mem_lvlnum[] = {
298 [PERF_MEM_LVLNUM_CXL] = "CXL",
299 [PERF_MEM_LVLNUM_IO] = "I/O",
300 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
301 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
302 [PERF_MEM_LVLNUM_RAM] = "RAM",
303 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
304 [PERF_MEM_LVLNUM_NA] = "N/A",
305};
306
307static const char * const mem_hops[] = {
308 "N/A",
309 /*
310 * While printing, 'Remote' will be added to represent
311 * 'Remote core, same node' accesses as remote field need
312 * to be set with mem_hops field.
313 */
314 "core, same node",
315 "node, same socket",
316 "socket, same board",
317 "board",
318};
319
320static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
321{
322 u64 op = PERF_MEM_LOCK_NA;
323 int l;
324
325 if (mem_info)
326 op = mem_info->data_src.mem_op;
327
328 if (op & PERF_MEM_OP_NA)
329 l = scnprintf(out, sz, "N/A");
330 else if (op & PERF_MEM_OP_LOAD)
331 l = scnprintf(out, sz, "LOAD");
332 else if (op & PERF_MEM_OP_STORE)
333 l = scnprintf(out, sz, "STORE");
334 else if (op & PERF_MEM_OP_PFETCH)
335 l = scnprintf(out, sz, "PFETCH");
336 else if (op & PERF_MEM_OP_EXEC)
337 l = scnprintf(out, sz, "EXEC");
338 else
339 l = scnprintf(out, sz, "No");
340
341 return l;
342}
343
344int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
345{
346 size_t i, l = 0;
347 u64 m = PERF_MEM_LVL_NA;
348 u64 hit, miss;
349 int printed = 0;
350
351 if (mem_info)
352 m = mem_info->data_src.mem_lvl;
353
354 sz -= 1; /* -1 for null termination */
355 out[0] = '\0';
356
357 hit = m & PERF_MEM_LVL_HIT;
358 miss = m & PERF_MEM_LVL_MISS;
359
360 /* already taken care of */
361 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
362
363 if (mem_info && mem_info->data_src.mem_remote) {
364 strcat(out, "Remote ");
365 l += 7;
366 }
367
368 /*
369 * Incase mem_hops field is set, we can skip printing data source via
370 * PERF_MEM_LVL namespace.
371 */
372 if (mem_info && mem_info->data_src.mem_hops) {
373 l += scnprintf(out + l, sz - l, "%s ", mem_hops[mem_info->data_src.mem_hops]);
374 } else {
375 for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) {
376 if (!(m & 0x1))
377 continue;
378 if (printed++) {
379 strcat(out, " or ");
380 l += 4;
381 }
382 l += scnprintf(out + l, sz - l, mem_lvl[i]);
383 }
384 }
385
386 if (mem_info && mem_info->data_src.mem_lvl_num) {
387 int lvl = mem_info->data_src.mem_lvl_num;
388 if (printed++) {
389 strcat(out, " or ");
390 l += 4;
391 }
392 if (mem_lvlnum[lvl])
393 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
394 else
395 l += scnprintf(out + l, sz - l, "L%d", lvl);
396 }
397
398 if (l == 0)
399 l += scnprintf(out + l, sz - l, "N/A");
400 if (hit)
401 l += scnprintf(out + l, sz - l, " hit");
402 if (miss)
403 l += scnprintf(out + l, sz - l, " miss");
404
405 return l;
406}
407
408static const char * const snoop_access[] = {
409 "N/A",
410 "None",
411 "Hit",
412 "Miss",
413 "HitM",
414};
415
416static const char * const snoopx_access[] = {
417 "Fwd",
418 "Peer",
419};
420
421int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
422{
423 size_t i, l = 0;
424 u64 m = PERF_MEM_SNOOP_NA;
425
426 sz -= 1; /* -1 for null termination */
427 out[0] = '\0';
428
429 if (mem_info)
430 m = mem_info->data_src.mem_snoop;
431
432 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
433 if (!(m & 0x1))
434 continue;
435 if (l) {
436 strcat(out, " or ");
437 l += 4;
438 }
439 l += scnprintf(out + l, sz - l, snoop_access[i]);
440 }
441
442 m = 0;
443 if (mem_info)
444 m = mem_info->data_src.mem_snoopx;
445
446 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
447 if (!(m & 0x1))
448 continue;
449
450 if (l) {
451 strcat(out, " or ");
452 l += 4;
453 }
454 l += scnprintf(out + l, sz - l, snoopx_access[i]);
455 }
456
457 if (*out == '\0')
458 l += scnprintf(out, sz - l, "N/A");
459
460 return l;
461}
462
463int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
464{
465 u64 mask = PERF_MEM_LOCK_NA;
466 int l;
467
468 if (mem_info)
469 mask = mem_info->data_src.mem_lock;
470
471 if (mask & PERF_MEM_LOCK_NA)
472 l = scnprintf(out, sz, "N/A");
473 else if (mask & PERF_MEM_LOCK_LOCKED)
474 l = scnprintf(out, sz, "Yes");
475 else
476 l = scnprintf(out, sz, "No");
477
478 return l;
479}
480
481int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
482{
483 size_t l = 0;
484 u64 mask = PERF_MEM_BLK_NA;
485
486 sz -= 1; /* -1 for null termination */
487 out[0] = '\0';
488
489 if (mem_info)
490 mask = mem_info->data_src.mem_blk;
491
492 if (!mask || (mask & PERF_MEM_BLK_NA)) {
493 l += scnprintf(out + l, sz - l, " N/A");
494 return l;
495 }
496 if (mask & PERF_MEM_BLK_DATA)
497 l += scnprintf(out + l, sz - l, " Data");
498 if (mask & PERF_MEM_BLK_ADDR)
499 l += scnprintf(out + l, sz - l, " Addr");
500
501 return l;
502}
503
504int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
505{
506 int i = 0;
507
508 i += scnprintf(out, sz, "|OP ");
509 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
510 i += scnprintf(out + i, sz - i, "|LVL ");
511 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
512 i += scnprintf(out + i, sz - i, "|SNP ");
513 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
514 i += scnprintf(out + i, sz - i, "|TLB ");
515 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
516 i += scnprintf(out + i, sz - i, "|LCK ");
517 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
518 i += scnprintf(out + i, sz - i, "|BLK ");
519 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
520
521 return i;
522}
523
524int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
525{
526 union perf_mem_data_src *data_src = &mi->data_src;
527 u64 daddr = mi->daddr.addr;
528 u64 op = data_src->mem_op;
529 u64 lvl = data_src->mem_lvl;
530 u64 snoop = data_src->mem_snoop;
531 u64 snoopx = data_src->mem_snoopx;
532 u64 lock = data_src->mem_lock;
533 u64 blk = data_src->mem_blk;
534 /*
535 * Skylake might report unknown remote level via this
536 * bit, consider it when evaluating remote HITMs.
537 *
538 * Incase of power, remote field can also be used to denote cache
539 * accesses from the another core of same node. Hence, setting
540 * mrem only when HOPS is zero along with set remote field.
541 */
542 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
543 int err = 0;
544
545#define HITM_INC(__f) \
546do { \
547 stats->__f++; \
548 stats->tot_hitm++; \
549} while (0)
550
551#define PEER_INC(__f) \
552do { \
553 stats->__f++; \
554 stats->tot_peer++; \
555} while (0)
556
557#define P(a, b) PERF_MEM_##a##_##b
558
559 stats->nr_entries++;
560
561 if (lock & P(LOCK, LOCKED)) stats->locks++;
562
563 if (blk & P(BLK, DATA)) stats->blk_data++;
564 if (blk & P(BLK, ADDR)) stats->blk_addr++;
565
566 if (op & P(OP, LOAD)) {
567 /* load */
568 stats->load++;
569
570 if (!daddr) {
571 stats->ld_noadrs++;
572 return -1;
573 }
574
575 if (lvl & P(LVL, HIT)) {
576 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
577 if (lvl & P(LVL, IO)) stats->ld_io++;
578 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
579 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
580 if (lvl & P(LVL, L2)) {
581 stats->ld_l2hit++;
582
583 if (snoopx & P(SNOOPX, PEER))
584 PEER_INC(lcl_peer);
585 }
586 if (lvl & P(LVL, L3 )) {
587 if (snoop & P(SNOOP, HITM))
588 HITM_INC(lcl_hitm);
589 else
590 stats->ld_llchit++;
591
592 if (snoopx & P(SNOOPX, PEER))
593 PEER_INC(lcl_peer);
594 }
595
596 if (lvl & P(LVL, LOC_RAM)) {
597 stats->lcl_dram++;
598 if (snoop & P(SNOOP, HIT))
599 stats->ld_shared++;
600 else
601 stats->ld_excl++;
602 }
603
604 if ((lvl & P(LVL, REM_RAM1)) ||
605 (lvl & P(LVL, REM_RAM2)) ||
606 mrem) {
607 stats->rmt_dram++;
608 if (snoop & P(SNOOP, HIT))
609 stats->ld_shared++;
610 else
611 stats->ld_excl++;
612 }
613 }
614
615 if ((lvl & P(LVL, REM_CCE1)) ||
616 (lvl & P(LVL, REM_CCE2)) ||
617 mrem) {
618 if (snoop & P(SNOOP, HIT)) {
619 stats->rmt_hit++;
620 } else if (snoop & P(SNOOP, HITM)) {
621 HITM_INC(rmt_hitm);
622 } else if (snoopx & P(SNOOPX, PEER)) {
623 stats->rmt_hit++;
624 PEER_INC(rmt_peer);
625 }
626 }
627
628 if ((lvl & P(LVL, MISS)))
629 stats->ld_miss++;
630
631 } else if (op & P(OP, STORE)) {
632 /* store */
633 stats->store++;
634
635 if (!daddr) {
636 stats->st_noadrs++;
637 return -1;
638 }
639
640 if (lvl & P(LVL, HIT)) {
641 if (lvl & P(LVL, UNC)) stats->st_uncache++;
642 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
643 }
644 if (lvl & P(LVL, MISS))
645 if (lvl & P(LVL, L1)) stats->st_l1miss++;
646 if (lvl & P(LVL, NA))
647 stats->st_na++;
648 } else {
649 /* unparsable data_src? */
650 stats->noparse++;
651 return -1;
652 }
653
654 if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
655 stats->nomap++;
656 return -1;
657 }
658
659#undef P
660#undef HITM_INC
661 return err;
662}
663
664void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
665{
666 stats->nr_entries += add->nr_entries;
667
668 stats->locks += add->locks;
669 stats->store += add->store;
670 stats->st_uncache += add->st_uncache;
671 stats->st_noadrs += add->st_noadrs;
672 stats->st_l1hit += add->st_l1hit;
673 stats->st_l1miss += add->st_l1miss;
674 stats->st_na += add->st_na;
675 stats->load += add->load;
676 stats->ld_excl += add->ld_excl;
677 stats->ld_shared += add->ld_shared;
678 stats->ld_uncache += add->ld_uncache;
679 stats->ld_io += add->ld_io;
680 stats->ld_miss += add->ld_miss;
681 stats->ld_noadrs += add->ld_noadrs;
682 stats->ld_fbhit += add->ld_fbhit;
683 stats->ld_l1hit += add->ld_l1hit;
684 stats->ld_l2hit += add->ld_l2hit;
685 stats->ld_llchit += add->ld_llchit;
686 stats->lcl_hitm += add->lcl_hitm;
687 stats->rmt_hitm += add->rmt_hitm;
688 stats->tot_hitm += add->tot_hitm;
689 stats->lcl_peer += add->lcl_peer;
690 stats->rmt_peer += add->rmt_peer;
691 stats->tot_peer += add->tot_peer;
692 stats->rmt_hit += add->rmt_hit;
693 stats->lcl_dram += add->lcl_dram;
694 stats->rmt_dram += add->rmt_dram;
695 stats->blk_data += add->blk_data;
696 stats->blk_addr += add->blk_addr;
697 stats->nomap += add->nomap;
698 stats->noparse += add->noparse;
699}
1// SPDX-License-Identifier: GPL-2.0
2#include <stddef.h>
3#include <stdlib.h>
4#include <string.h>
5#include <errno.h>
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <unistd.h>
9#include <api/fs/fs.h>
10#include <linux/kernel.h>
11#include "cpumap.h"
12#include "map_symbol.h"
13#include "mem-events.h"
14#include "mem-info.h"
15#include "debug.h"
16#include "evsel.h"
17#include "symbol.h"
18#include "pmu.h"
19#include "pmus.h"
20
21unsigned int perf_mem_events__loads_ldlat = 30;
22
23#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
24
25struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
26 E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "mem-loads", true, 0),
27 E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0),
28 E(NULL, NULL, NULL, false, 0),
29};
30#undef E
31
32bool perf_mem_record[PERF_MEM_EVENTS__MAX] = { 0 };
33
34static char mem_loads_name[100];
35static char mem_stores_name[100];
36
37struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
38{
39 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
40 return NULL;
41
42 return &pmu->mem_events[i];
43}
44
45static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
46{
47 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
48 if (pmu->mem_events)
49 return pmu;
50 }
51 return NULL;
52}
53
54struct perf_pmu *perf_mem_events_find_pmu(void)
55{
56 /*
57 * The current perf mem doesn't support per-PMU configuration.
58 * The exact same configuration is applied to all the
59 * mem_events supported PMUs.
60 * Return the first mem_events supported PMU.
61 *
62 * Notes: The only case which may support multiple mem_events
63 * supported PMUs is Intel hybrid. The exact same mem_events
64 * is shared among the PMUs. Only configure the first PMU
65 * is good enough as well.
66 */
67 return perf_pmus__scan_mem(NULL);
68}
69
70/**
71 * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
72 * @pmu: Start pmu. If it's NULL, search the entire PMU list.
73 */
74int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
75{
76 int num = 0;
77
78 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
79 num++;
80
81 return num;
82}
83
84static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
85{
86 struct perf_mem_event *e;
87
88 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
89 return NULL;
90
91 e = &pmu->mem_events[i];
92 if (!e || !e->name)
93 return NULL;
94
95 if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
96 if (e->ldlat) {
97 if (!e->aux_event) {
98 /* ARM and Most of Intel */
99 scnprintf(mem_loads_name, sizeof(mem_loads_name),
100 e->name, pmu->name,
101 perf_mem_events__loads_ldlat);
102 } else {
103 /* Intel with mem-loads-aux event */
104 scnprintf(mem_loads_name, sizeof(mem_loads_name),
105 e->name, pmu->name, pmu->name,
106 perf_mem_events__loads_ldlat);
107 }
108 } else {
109 if (!e->aux_event) {
110 /* AMD and POWER */
111 scnprintf(mem_loads_name, sizeof(mem_loads_name),
112 e->name, pmu->name);
113 } else
114 return NULL;
115 }
116
117 return mem_loads_name;
118 }
119
120 if (i == PERF_MEM_EVENTS__STORE) {
121 scnprintf(mem_stores_name, sizeof(mem_stores_name),
122 e->name, pmu->name);
123 return mem_stores_name;
124 }
125
126 return NULL;
127}
128
129bool is_mem_loads_aux_event(struct evsel *leader)
130{
131 struct perf_pmu *pmu = leader->pmu;
132 struct perf_mem_event *e;
133
134 if (!pmu || !pmu->mem_events)
135 return false;
136
137 e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
138 if (!e->aux_event)
139 return false;
140
141 return leader->core.attr.config == e->aux_event;
142}
143
144int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
145{
146 char *tok, *saveptr = NULL;
147 bool found = false;
148 char *buf;
149 int j;
150
151 /* We need buffer that we know we can write to. */
152 buf = malloc(strlen(str) + 1);
153 if (!buf)
154 return -ENOMEM;
155
156 strcpy(buf, str);
157
158 tok = strtok_r((char *)buf, ",", &saveptr);
159
160 while (tok) {
161 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
162 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
163
164 if (!e->tag)
165 continue;
166
167 if (strstr(e->tag, tok))
168 perf_mem_record[j] = found = true;
169 }
170
171 tok = strtok_r(NULL, ",", &saveptr);
172 }
173
174 free(buf);
175
176 if (found)
177 return 0;
178
179 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
180 return -1;
181}
182
183static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
184 struct perf_mem_event *e)
185{
186 char path[PATH_MAX];
187 struct stat st;
188
189 if (!e->event_name)
190 return true;
191
192 scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
193
194 return !stat(path, &st);
195}
196
197static int __perf_pmu__mem_events_init(struct perf_pmu *pmu)
198{
199 const char *mnt = sysfs__mount();
200 bool found = false;
201 int j;
202
203 if (!mnt)
204 return -ENOENT;
205
206 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
207 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
208
209 /*
210 * If the event entry isn't valid, skip initialization
211 * and "e->supported" will keep false.
212 */
213 if (!e->tag)
214 continue;
215
216 e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
217 if (e->supported)
218 found = true;
219 }
220
221 return found ? 0 : -ENOENT;
222}
223
224int perf_pmu__mem_events_init(void)
225{
226 struct perf_pmu *pmu = NULL;
227
228 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
229 if (__perf_pmu__mem_events_init(pmu))
230 return -ENOENT;
231 }
232
233 return 0;
234}
235
236void perf_pmu__mem_events_list(struct perf_pmu *pmu)
237{
238 int j;
239
240 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
241 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
242
243 fprintf(stderr, "%-*s%-*s%s",
244 e->tag ? 13 : 0,
245 e->tag ? : "",
246 e->tag && verbose > 0 ? 25 : 0,
247 e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
248 e->supported ? ": available\n" : "");
249 }
250}
251
252int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
253{
254 const char *mnt = sysfs__mount();
255 struct perf_pmu *pmu = NULL;
256 struct perf_mem_event *e;
257 int i = *argv_nr;
258 const char *s;
259 char *copy;
260 struct perf_cpu_map *cpu_map = NULL;
261
262 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
263 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
264 e = perf_pmu__mem_events_ptr(pmu, j);
265
266 if (!perf_mem_record[j])
267 continue;
268
269 if (!e->supported) {
270 pr_err("failed: event '%s' not supported\n",
271 perf_pmu__mem_events_name(j, pmu));
272 return -1;
273 }
274
275 s = perf_pmu__mem_events_name(j, pmu);
276 if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
277 continue;
278
279 copy = strdup(s);
280 if (!copy)
281 return -1;
282
283 rec_argv[i++] = "-e";
284 rec_argv[i++] = copy;
285
286 cpu_map = perf_cpu_map__merge(cpu_map, pmu->cpus);
287 }
288 }
289
290 if (cpu_map) {
291 if (!perf_cpu_map__equal(cpu_map, cpu_map__online())) {
292 char buf[200];
293
294 cpu_map__snprint(cpu_map, buf, sizeof(buf));
295 pr_warning("Memory events are enabled on a subset of CPUs: %s\n", buf);
296 }
297 perf_cpu_map__put(cpu_map);
298 }
299
300 *argv_nr = i;
301 return 0;
302}
303
304static const char * const tlb_access[] = {
305 "N/A",
306 "HIT",
307 "MISS",
308 "L1",
309 "L2",
310 "Walker",
311 "Fault",
312};
313
314int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
315{
316 size_t l = 0, i;
317 u64 m = PERF_MEM_TLB_NA;
318 u64 hit, miss;
319
320 sz -= 1; /* -1 for null termination */
321 out[0] = '\0';
322
323 if (mem_info)
324 m = mem_info__const_data_src(mem_info)->mem_dtlb;
325
326 hit = m & PERF_MEM_TLB_HIT;
327 miss = m & PERF_MEM_TLB_MISS;
328
329 /* already taken care of */
330 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
331
332 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
333 if (!(m & 0x1))
334 continue;
335 if (l) {
336 strcat(out, " or ");
337 l += 4;
338 }
339 l += scnprintf(out + l, sz - l, tlb_access[i]);
340 }
341 if (*out == '\0')
342 l += scnprintf(out, sz - l, "N/A");
343 if (hit)
344 l += scnprintf(out + l, sz - l, " hit");
345 if (miss)
346 l += scnprintf(out + l, sz - l, " miss");
347
348 return l;
349}
350
351static const char * const mem_lvl[] = {
352 "N/A",
353 "HIT",
354 "MISS",
355 "L1",
356 "LFB/MAB",
357 "L2",
358 "L3",
359 "Local RAM",
360 "Remote RAM (1 hop)",
361 "Remote RAM (2 hops)",
362 "Remote Cache (1 hop)",
363 "Remote Cache (2 hops)",
364 "I/O",
365 "Uncached",
366};
367
368static const char * const mem_lvlnum[] = {
369 [PERF_MEM_LVLNUM_L1] = "L1",
370 [PERF_MEM_LVLNUM_L2] = "L2",
371 [PERF_MEM_LVLNUM_L3] = "L3",
372 [PERF_MEM_LVLNUM_L4] = "L4",
373 [PERF_MEM_LVLNUM_L2_MHB] = "L2 MHB",
374 [PERF_MEM_LVLNUM_MSC] = "Memory-side Cache",
375 [PERF_MEM_LVLNUM_UNC] = "Uncached",
376 [PERF_MEM_LVLNUM_CXL] = "CXL",
377 [PERF_MEM_LVLNUM_IO] = "I/O",
378 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
379 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
380 [PERF_MEM_LVLNUM_RAM] = "RAM",
381 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
382 [PERF_MEM_LVLNUM_NA] = "N/A",
383};
384
385static const char * const mem_hops[] = {
386 "N/A",
387 /*
388 * While printing, 'Remote' will be added to represent
389 * 'Remote core, same node' accesses as remote field need
390 * to be set with mem_hops field.
391 */
392 "core, same node",
393 "node, same socket",
394 "socket, same board",
395 "board",
396};
397
398static int perf_mem__op_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
399{
400 u64 op = PERF_MEM_LOCK_NA;
401 int l;
402
403 if (mem_info)
404 op = mem_info__const_data_src(mem_info)->mem_op;
405
406 if (op & PERF_MEM_OP_NA)
407 l = scnprintf(out, sz, "N/A");
408 else if (op & PERF_MEM_OP_LOAD)
409 l = scnprintf(out, sz, "LOAD");
410 else if (op & PERF_MEM_OP_STORE)
411 l = scnprintf(out, sz, "STORE");
412 else if (op & PERF_MEM_OP_PFETCH)
413 l = scnprintf(out, sz, "PFETCH");
414 else if (op & PERF_MEM_OP_EXEC)
415 l = scnprintf(out, sz, "EXEC");
416 else
417 l = scnprintf(out, sz, "No");
418
419 return l;
420}
421
422int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
423{
424 union perf_mem_data_src data_src;
425 int printed = 0;
426 size_t l = 0;
427 size_t i;
428 int lvl;
429 char hit_miss[5] = {0};
430
431 sz -= 1; /* -1 for null termination */
432 out[0] = '\0';
433
434 if (!mem_info)
435 goto na;
436
437 data_src = *mem_info__const_data_src(mem_info);
438
439 if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
440 memcpy(hit_miss, "hit", 3);
441 else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
442 memcpy(hit_miss, "miss", 4);
443
444 lvl = data_src.mem_lvl_num;
445 if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
446 if (data_src.mem_remote) {
447 strcat(out, "Remote ");
448 l += 7;
449 }
450
451 if (data_src.mem_hops)
452 l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
453
454 if (mem_lvlnum[lvl])
455 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
456 else
457 l += scnprintf(out + l, sz - l, "Unknown level %d", lvl);
458
459 l += scnprintf(out + l, sz - l, " %s", hit_miss);
460 return l;
461 }
462
463 lvl = data_src.mem_lvl;
464 if (!lvl)
465 goto na;
466
467 lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
468 if (!lvl)
469 goto na;
470
471 for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
472 if (!(lvl & 0x1))
473 continue;
474 if (printed++) {
475 strcat(out, " or ");
476 l += 4;
477 }
478 l += scnprintf(out + l, sz - l, mem_lvl[i]);
479 }
480
481 if (printed) {
482 l += scnprintf(out + l, sz - l, " %s", hit_miss);
483 return l;
484 }
485
486na:
487 strcat(out, "N/A");
488 return 3;
489}
490
491static const char * const snoop_access[] = {
492 "N/A",
493 "None",
494 "Hit",
495 "Miss",
496 "HitM",
497};
498
499static const char * const snoopx_access[] = {
500 "Fwd",
501 "Peer",
502};
503
504int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
505{
506 size_t i, l = 0;
507 u64 m = PERF_MEM_SNOOP_NA;
508
509 sz -= 1; /* -1 for null termination */
510 out[0] = '\0';
511
512 if (mem_info)
513 m = mem_info__const_data_src(mem_info)->mem_snoop;
514
515 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
516 if (!(m & 0x1))
517 continue;
518 if (l) {
519 strcat(out, " or ");
520 l += 4;
521 }
522 l += scnprintf(out + l, sz - l, snoop_access[i]);
523 }
524
525 m = 0;
526 if (mem_info)
527 m = mem_info__const_data_src(mem_info)->mem_snoopx;
528
529 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
530 if (!(m & 0x1))
531 continue;
532
533 if (l) {
534 strcat(out, " or ");
535 l += 4;
536 }
537 l += scnprintf(out + l, sz - l, snoopx_access[i]);
538 }
539
540 if (*out == '\0')
541 l += scnprintf(out, sz - l, "N/A");
542
543 return l;
544}
545
546int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
547{
548 u64 mask = PERF_MEM_LOCK_NA;
549 int l;
550
551 if (mem_info)
552 mask = mem_info__const_data_src(mem_info)->mem_lock;
553
554 if (mask & PERF_MEM_LOCK_NA)
555 l = scnprintf(out, sz, "N/A");
556 else if (mask & PERF_MEM_LOCK_LOCKED)
557 l = scnprintf(out, sz, "Yes");
558 else
559 l = scnprintf(out, sz, "No");
560
561 return l;
562}
563
564int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
565{
566 size_t l = 0;
567 u64 mask = PERF_MEM_BLK_NA;
568
569 sz -= 1; /* -1 for null termination */
570 out[0] = '\0';
571
572 if (mem_info)
573 mask = mem_info__const_data_src(mem_info)->mem_blk;
574
575 if (!mask || (mask & PERF_MEM_BLK_NA)) {
576 l += scnprintf(out + l, sz - l, " N/A");
577 return l;
578 }
579 if (mask & PERF_MEM_BLK_DATA)
580 l += scnprintf(out + l, sz - l, " Data");
581 if (mask & PERF_MEM_BLK_ADDR)
582 l += scnprintf(out + l, sz - l, " Addr");
583
584 return l;
585}
586
587int perf_script__meminfo_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
588{
589 int i = 0;
590
591 i += scnprintf(out, sz, "|OP ");
592 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
593 i += scnprintf(out + i, sz - i, "|LVL ");
594 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
595 i += scnprintf(out + i, sz - i, "|SNP ");
596 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
597 i += scnprintf(out + i, sz - i, "|TLB ");
598 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
599 i += scnprintf(out + i, sz - i, "|LCK ");
600 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
601 i += scnprintf(out + i, sz - i, "|BLK ");
602 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
603
604 return i;
605}
606
607int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
608{
609 union perf_mem_data_src *data_src = mem_info__data_src(mi);
610 u64 daddr = mem_info__daddr(mi)->addr;
611 u64 op = data_src->mem_op;
612 u64 lvl = data_src->mem_lvl;
613 u64 snoop = data_src->mem_snoop;
614 u64 snoopx = data_src->mem_snoopx;
615 u64 lock = data_src->mem_lock;
616 u64 blk = data_src->mem_blk;
617 /*
618 * Skylake might report unknown remote level via this
619 * bit, consider it when evaluating remote HITMs.
620 *
621 * Incase of power, remote field can also be used to denote cache
622 * accesses from the another core of same node. Hence, setting
623 * mrem only when HOPS is zero along with set remote field.
624 */
625 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
626 int err = 0;
627
628#define HITM_INC(__f) \
629do { \
630 stats->__f++; \
631 stats->tot_hitm++; \
632} while (0)
633
634#define PEER_INC(__f) \
635do { \
636 stats->__f++; \
637 stats->tot_peer++; \
638} while (0)
639
640#define P(a, b) PERF_MEM_##a##_##b
641
642 stats->nr_entries++;
643
644 if (lock & P(LOCK, LOCKED)) stats->locks++;
645
646 if (blk & P(BLK, DATA)) stats->blk_data++;
647 if (blk & P(BLK, ADDR)) stats->blk_addr++;
648
649 if (op & P(OP, LOAD)) {
650 /* load */
651 stats->load++;
652
653 if (!daddr) {
654 stats->ld_noadrs++;
655 return -1;
656 }
657
658 if (lvl & P(LVL, HIT)) {
659 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
660 if (lvl & P(LVL, IO)) stats->ld_io++;
661 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
662 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
663 if (lvl & P(LVL, L2)) {
664 stats->ld_l2hit++;
665
666 if (snoopx & P(SNOOPX, PEER))
667 PEER_INC(lcl_peer);
668 }
669 if (lvl & P(LVL, L3 )) {
670 if (snoop & P(SNOOP, HITM))
671 HITM_INC(lcl_hitm);
672 else
673 stats->ld_llchit++;
674
675 if (snoopx & P(SNOOPX, PEER))
676 PEER_INC(lcl_peer);
677 }
678
679 if (lvl & P(LVL, LOC_RAM)) {
680 stats->lcl_dram++;
681 if (snoop & P(SNOOP, HIT))
682 stats->ld_shared++;
683 else
684 stats->ld_excl++;
685 }
686
687 if ((lvl & P(LVL, REM_RAM1)) ||
688 (lvl & P(LVL, REM_RAM2)) ||
689 mrem) {
690 stats->rmt_dram++;
691 if (snoop & P(SNOOP, HIT))
692 stats->ld_shared++;
693 else
694 stats->ld_excl++;
695 }
696 }
697
698 if ((lvl & P(LVL, REM_CCE1)) ||
699 (lvl & P(LVL, REM_CCE2)) ||
700 mrem) {
701 if (snoop & P(SNOOP, HIT)) {
702 stats->rmt_hit++;
703 } else if (snoop & P(SNOOP, HITM)) {
704 HITM_INC(rmt_hitm);
705 } else if (snoopx & P(SNOOPX, PEER)) {
706 stats->rmt_hit++;
707 PEER_INC(rmt_peer);
708 }
709 }
710
711 if ((lvl & P(LVL, MISS)))
712 stats->ld_miss++;
713
714 } else if (op & P(OP, STORE)) {
715 /* store */
716 stats->store++;
717
718 if (!daddr) {
719 stats->st_noadrs++;
720 return -1;
721 }
722
723 if (lvl & P(LVL, HIT)) {
724 if (lvl & P(LVL, UNC)) stats->st_uncache++;
725 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
726 }
727 if (lvl & P(LVL, MISS))
728 if (lvl & P(LVL, L1)) stats->st_l1miss++;
729 if (lvl & P(LVL, NA))
730 stats->st_na++;
731 } else {
732 /* unparsable data_src? */
733 stats->noparse++;
734 return -1;
735 }
736
737 if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) {
738 stats->nomap++;
739 return -1;
740 }
741
742#undef P
743#undef HITM_INC
744 return err;
745}
746
747void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
748{
749 stats->nr_entries += add->nr_entries;
750
751 stats->locks += add->locks;
752 stats->store += add->store;
753 stats->st_uncache += add->st_uncache;
754 stats->st_noadrs += add->st_noadrs;
755 stats->st_l1hit += add->st_l1hit;
756 stats->st_l1miss += add->st_l1miss;
757 stats->st_na += add->st_na;
758 stats->load += add->load;
759 stats->ld_excl += add->ld_excl;
760 stats->ld_shared += add->ld_shared;
761 stats->ld_uncache += add->ld_uncache;
762 stats->ld_io += add->ld_io;
763 stats->ld_miss += add->ld_miss;
764 stats->ld_noadrs += add->ld_noadrs;
765 stats->ld_fbhit += add->ld_fbhit;
766 stats->ld_l1hit += add->ld_l1hit;
767 stats->ld_l2hit += add->ld_l2hit;
768 stats->ld_llchit += add->ld_llchit;
769 stats->lcl_hitm += add->lcl_hitm;
770 stats->rmt_hitm += add->rmt_hitm;
771 stats->tot_hitm += add->tot_hitm;
772 stats->lcl_peer += add->lcl_peer;
773 stats->rmt_peer += add->rmt_peer;
774 stats->tot_peer += add->tot_peer;
775 stats->rmt_hit += add->rmt_hit;
776 stats->lcl_dram += add->lcl_dram;
777 stats->rmt_dram += add->rmt_dram;
778 stats->blk_data += add->blk_data;
779 stats->blk_addr += add->blk_addr;
780 stats->nomap += add->nomap;
781 stats->noparse += add->noparse;
782}