Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stddef.h>
  3#include <stdlib.h>
  4#include <string.h>
  5#include <errno.h>
  6#include <sys/types.h>
  7#include <sys/stat.h>
  8#include <unistd.h>
  9#include <api/fs/fs.h>
 10#include <linux/kernel.h>
 11#include "map_symbol.h"
 12#include "mem-events.h"
 13#include "debug.h"
 14#include "symbol.h"
 15#include "pmu.h"
 16#include "pmu-hybrid.h"
 17
 18unsigned int perf_mem_events__loads_ldlat = 30;
 19
 20#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
 21
 22static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
 23	E("ldlat-loads",	"cpu/mem-loads,ldlat=%u/P",	"cpu/events/mem-loads"),
 24	E("ldlat-stores",	"cpu/mem-stores/P",		"cpu/events/mem-stores"),
 25	E(NULL,			NULL,				NULL),
 26};
 27#undef E
 28
 29static char mem_loads_name[100];
 30static bool mem_loads_name__init;
 31
 32struct perf_mem_event * __weak perf_mem_events__ptr(int i)
 33{
 34	if (i >= PERF_MEM_EVENTS__MAX)
 35		return NULL;
 36
 37	return &perf_mem_events[i];
 38}
 39
 40char * __weak perf_mem_events__name(int i, char *pmu_name  __maybe_unused)
 41{
 42	struct perf_mem_event *e = perf_mem_events__ptr(i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43
 
 
 
 
 
 
 
 
 
 
 
 44	if (!e)
 45		return NULL;
 46
 47	if (i == PERF_MEM_EVENTS__LOAD) {
 48		if (!mem_loads_name__init) {
 49			mem_loads_name__init = true;
 50			scnprintf(mem_loads_name, sizeof(mem_loads_name),
 51				  e->name, perf_mem_events__loads_ldlat);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52		}
 
 53		return mem_loads_name;
 54	}
 55
 56	return (char *)e->name;
 
 
 
 
 
 
 57}
 58
 59__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
 60{
 61	return false;
 
 
 
 
 
 
 
 
 
 
 62}
 63
 64int perf_mem_events__parse(const char *str)
 65{
 66	char *tok, *saveptr = NULL;
 67	bool found = false;
 68	char *buf;
 69	int j;
 70
 71	/* We need buffer that we know we can write to. */
 72	buf = malloc(strlen(str) + 1);
 73	if (!buf)
 74		return -ENOMEM;
 75
 76	strcpy(buf, str);
 77
 78	tok = strtok_r((char *)buf, ",", &saveptr);
 79
 80	while (tok) {
 81		for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
 82			struct perf_mem_event *e = perf_mem_events__ptr(j);
 83
 84			if (!e->tag)
 85				continue;
 86
 87			if (strstr(e->tag, tok))
 88				e->record = found = true;
 89		}
 90
 91		tok = strtok_r(NULL, ",", &saveptr);
 92	}
 93
 94	free(buf);
 95
 96	if (found)
 97		return 0;
 98
 99	pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
100	return -1;
101}
102
103static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
 
104{
105	char path[PATH_MAX];
106	struct stat st;
107
108	scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
 
 
 
 
109	return !stat(path, &st);
110}
111
112int perf_mem_events__init(void)
113{
114	const char *mnt = sysfs__mount();
115	bool found = false;
116	int j;
117
118	if (!mnt)
119		return -ENOENT;
120
121	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
122		struct perf_mem_event *e = perf_mem_events__ptr(j);
123		struct perf_pmu *pmu;
124		char sysfs_name[100];
125
126		/*
127		 * If the event entry isn't valid, skip initialization
128		 * and "e->supported" will keep false.
129		 */
130		if (!e->tag)
131			continue;
132
133		if (!perf_pmu__has_hybrid()) {
134			scnprintf(sysfs_name, sizeof(sysfs_name),
135				  e->sysfs_name, "cpu");
136			e->supported = perf_mem_event__supported(mnt, sysfs_name);
137		} else {
138			perf_pmu__for_each_hybrid_pmu(pmu) {
139				scnprintf(sysfs_name, sizeof(sysfs_name),
140					  e->sysfs_name, pmu->name);
141				e->supported |= perf_mem_event__supported(mnt, sysfs_name);
142			}
143		}
144
145		if (e->supported)
146			found = true;
147	}
148
149	return found ? 0 : -ENOENT;
150}
151
152void perf_mem_events__list(void)
153{
154	int j;
155
156	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
157		struct perf_mem_event *e = perf_mem_events__ptr(j);
158
159		fprintf(stderr, "%-*s%-*s%s",
160			e->tag ? 13 : 0,
161			e->tag ? : "",
162			e->tag && verbose > 0 ? 25 : 0,
163			e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "",
164			e->supported ? ": available\n" : "");
165	}
166}
167
168static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
169						    int idx)
170{
171	const char *mnt = sysfs__mount();
172	char sysfs_name[100];
173	struct perf_pmu *pmu;
174
175	perf_pmu__for_each_hybrid_pmu(pmu) {
176		scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
177			  pmu->name);
178		if (!perf_mem_event__supported(mnt, sysfs_name)) {
179			pr_err("failed: event '%s' not supported\n",
180			       perf_mem_events__name(idx, pmu->name));
181		}
182	}
183}
184
185int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
186				 char **rec_tmp, int *tmp_nr)
187{
188	int i = *argv_nr, k = 0;
189	struct perf_mem_event *e;
190	struct perf_pmu *pmu;
191	char *s;
 
 
 
 
 
192
193	for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
194		e = perf_mem_events__ptr(j);
195		if (!e->record)
196			continue;
197
198		if (!perf_pmu__has_hybrid()) {
199			if (!e->supported) {
200				pr_err("failed: event '%s' not supported\n",
201				       perf_mem_events__name(j, NULL));
202				return -1;
203			}
204
205			rec_argv[i++] = "-e";
206			rec_argv[i++] = perf_mem_events__name(j, NULL);
207		} else {
208			if (!e->supported) {
209				perf_mem_events__print_unsupport_hybrid(e, j);
 
210				return -1;
211			}
212
213			perf_pmu__for_each_hybrid_pmu(pmu) {
214				rec_argv[i++] = "-e";
215				s = perf_mem_events__name(j, pmu->name);
216				if (s) {
217					s = strdup(s);
218					if (!s)
219						return -1;
220
221					rec_argv[i++] = s;
222					rec_tmp[k++] = s;
223				}
224			}
225		}
226	}
227
228	*argv_nr = i;
229	*tmp_nr = k;
230	return 0;
231}
232
233static const char * const tlb_access[] = {
234	"N/A",
235	"HIT",
236	"MISS",
237	"L1",
238	"L2",
239	"Walker",
240	"Fault",
241};
242
243int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
244{
245	size_t l = 0, i;
246	u64 m = PERF_MEM_TLB_NA;
247	u64 hit, miss;
248
249	sz -= 1; /* -1 for null termination */
250	out[0] = '\0';
251
252	if (mem_info)
253		m = mem_info->data_src.mem_dtlb;
254
255	hit = m & PERF_MEM_TLB_HIT;
256	miss = m & PERF_MEM_TLB_MISS;
257
258	/* already taken care of */
259	m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
260
261	for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
262		if (!(m & 0x1))
263			continue;
264		if (l) {
265			strcat(out, " or ");
266			l += 4;
267		}
268		l += scnprintf(out + l, sz - l, tlb_access[i]);
269	}
270	if (*out == '\0')
271		l += scnprintf(out, sz - l, "N/A");
272	if (hit)
273		l += scnprintf(out + l, sz - l, " hit");
274	if (miss)
275		l += scnprintf(out + l, sz - l, " miss");
276
277	return l;
278}
279
280static const char * const mem_lvl[] = {
281	"N/A",
282	"HIT",
283	"MISS",
284	"L1",
285	"LFB/MAB",
286	"L2",
287	"L3",
288	"Local RAM",
289	"Remote RAM (1 hop)",
290	"Remote RAM (2 hops)",
291	"Remote Cache (1 hop)",
292	"Remote Cache (2 hops)",
293	"I/O",
294	"Uncached",
295};
296
297static const char * const mem_lvlnum[] = {
 
298	[PERF_MEM_LVLNUM_CXL] = "CXL",
299	[PERF_MEM_LVLNUM_IO] = "I/O",
300	[PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
301	[PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
302	[PERF_MEM_LVLNUM_RAM] = "RAM",
303	[PERF_MEM_LVLNUM_PMEM] = "PMEM",
304	[PERF_MEM_LVLNUM_NA] = "N/A",
305};
306
307static const char * const mem_hops[] = {
308	"N/A",
309	/*
310	 * While printing, 'Remote' will be added to represent
311	 * 'Remote core, same node' accesses as remote field need
312	 * to be set with mem_hops field.
313	 */
314	"core, same node",
315	"node, same socket",
316	"socket, same board",
317	"board",
318};
319
320static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
321{
322	u64 op = PERF_MEM_LOCK_NA;
323	int l;
324
325	if (mem_info)
326		op = mem_info->data_src.mem_op;
327
328	if (op & PERF_MEM_OP_NA)
329		l = scnprintf(out, sz, "N/A");
330	else if (op & PERF_MEM_OP_LOAD)
331		l = scnprintf(out, sz, "LOAD");
332	else if (op & PERF_MEM_OP_STORE)
333		l = scnprintf(out, sz, "STORE");
334	else if (op & PERF_MEM_OP_PFETCH)
335		l = scnprintf(out, sz, "PFETCH");
336	else if (op & PERF_MEM_OP_EXEC)
337		l = scnprintf(out, sz, "EXEC");
338	else
339		l = scnprintf(out, sz, "No");
340
341	return l;
342}
343
344int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
345{
346	size_t i, l = 0;
347	u64 m =  PERF_MEM_LVL_NA;
348	u64 hit, miss;
349	int printed = 0;
350
351	if (mem_info)
352		m  = mem_info->data_src.mem_lvl;
 
353
354	sz -= 1; /* -1 for null termination */
355	out[0] = '\0';
356
357	hit = m & PERF_MEM_LVL_HIT;
358	miss = m & PERF_MEM_LVL_MISS;
359
360	/* already taken care of */
361	m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
362
363	if (mem_info && mem_info->data_src.mem_remote) {
364		strcat(out, "Remote ");
365		l += 7;
366	}
367
368	/*
369	 * Incase mem_hops field is set, we can skip printing data source via
370	 * PERF_MEM_LVL namespace.
371	 */
372	if (mem_info && mem_info->data_src.mem_hops) {
373		l += scnprintf(out + l, sz - l, "%s ", mem_hops[mem_info->data_src.mem_hops]);
374	} else {
375		for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) {
376			if (!(m & 0x1))
377				continue;
378			if (printed++) {
379				strcat(out, " or ");
380				l += 4;
381			}
382			l += scnprintf(out + l, sz - l, mem_lvl[i]);
383		}
 
 
 
 
 
 
 
 
 
 
 
384	}
385
386	if (mem_info && mem_info->data_src.mem_lvl_num) {
387		int lvl = mem_info->data_src.mem_lvl_num;
 
 
 
 
 
 
 
 
 
388		if (printed++) {
389			strcat(out, " or ");
390			l += 4;
391		}
392		if (mem_lvlnum[lvl])
393			l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
394		else
395			l += scnprintf(out + l, sz - l, "L%d", lvl);
396	}
397
398	if (l == 0)
399		l += scnprintf(out + l, sz - l, "N/A");
400	if (hit)
401		l += scnprintf(out + l, sz - l, " hit");
402	if (miss)
403		l += scnprintf(out + l, sz - l, " miss");
404
405	return l;
 
 
406}
407
408static const char * const snoop_access[] = {
409	"N/A",
410	"None",
411	"Hit",
412	"Miss",
413	"HitM",
414};
415
416static const char * const snoopx_access[] = {
417	"Fwd",
418	"Peer",
419};
420
421int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
422{
423	size_t i, l = 0;
424	u64 m = PERF_MEM_SNOOP_NA;
425
426	sz -= 1; /* -1 for null termination */
427	out[0] = '\0';
428
429	if (mem_info)
430		m = mem_info->data_src.mem_snoop;
431
432	for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
433		if (!(m & 0x1))
434			continue;
435		if (l) {
436			strcat(out, " or ");
437			l += 4;
438		}
439		l += scnprintf(out + l, sz - l, snoop_access[i]);
440	}
441
442	m = 0;
443	if (mem_info)
444		m = mem_info->data_src.mem_snoopx;
445
446	for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
447		if (!(m & 0x1))
448			continue;
449
450		if (l) {
451			strcat(out, " or ");
452			l += 4;
453		}
454		l += scnprintf(out + l, sz - l, snoopx_access[i]);
455	}
456
457	if (*out == '\0')
458		l += scnprintf(out, sz - l, "N/A");
459
460	return l;
461}
462
463int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
464{
465	u64 mask = PERF_MEM_LOCK_NA;
466	int l;
467
468	if (mem_info)
469		mask = mem_info->data_src.mem_lock;
470
471	if (mask & PERF_MEM_LOCK_NA)
472		l = scnprintf(out, sz, "N/A");
473	else if (mask & PERF_MEM_LOCK_LOCKED)
474		l = scnprintf(out, sz, "Yes");
475	else
476		l = scnprintf(out, sz, "No");
477
478	return l;
479}
480
481int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
482{
483	size_t l = 0;
484	u64 mask = PERF_MEM_BLK_NA;
485
486	sz -= 1; /* -1 for null termination */
487	out[0] = '\0';
488
489	if (mem_info)
490		mask = mem_info->data_src.mem_blk;
491
492	if (!mask || (mask & PERF_MEM_BLK_NA)) {
493		l += scnprintf(out + l, sz - l, " N/A");
494		return l;
495	}
496	if (mask & PERF_MEM_BLK_DATA)
497		l += scnprintf(out + l, sz - l, " Data");
498	if (mask & PERF_MEM_BLK_ADDR)
499		l += scnprintf(out + l, sz - l, " Addr");
500
501	return l;
502}
503
504int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
505{
506	int i = 0;
507
508	i += scnprintf(out, sz, "|OP ");
509	i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
510	i += scnprintf(out + i, sz - i, "|LVL ");
511	i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
512	i += scnprintf(out + i, sz - i, "|SNP ");
513	i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
514	i += scnprintf(out + i, sz - i, "|TLB ");
515	i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
516	i += scnprintf(out + i, sz - i, "|LCK ");
517	i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
518	i += scnprintf(out + i, sz - i, "|BLK ");
519	i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
520
521	return i;
522}
523
524int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
525{
526	union perf_mem_data_src *data_src = &mi->data_src;
527	u64 daddr  = mi->daddr.addr;
528	u64 op     = data_src->mem_op;
529	u64 lvl    = data_src->mem_lvl;
530	u64 snoop  = data_src->mem_snoop;
531	u64 snoopx = data_src->mem_snoopx;
532	u64 lock   = data_src->mem_lock;
533	u64 blk    = data_src->mem_blk;
534	/*
535	 * Skylake might report unknown remote level via this
536	 * bit, consider it when evaluating remote HITMs.
537	 *
538	 * Incase of power, remote field can also be used to denote cache
539	 * accesses from the another core of same node. Hence, setting
540	 * mrem only when HOPS is zero along with set remote field.
541	 */
542	bool mrem  = (data_src->mem_remote && !data_src->mem_hops);
543	int err = 0;
544
545#define HITM_INC(__f)		\
546do {				\
547	stats->__f++;		\
548	stats->tot_hitm++;	\
549} while (0)
550
551#define PEER_INC(__f)		\
552do {				\
553	stats->__f++;		\
554	stats->tot_peer++;	\
555} while (0)
556
557#define P(a, b) PERF_MEM_##a##_##b
558
559	stats->nr_entries++;
560
561	if (lock & P(LOCK, LOCKED)) stats->locks++;
562
563	if (blk & P(BLK, DATA)) stats->blk_data++;
564	if (blk & P(BLK, ADDR)) stats->blk_addr++;
565
566	if (op & P(OP, LOAD)) {
567		/* load */
568		stats->load++;
569
570		if (!daddr) {
571			stats->ld_noadrs++;
572			return -1;
573		}
574
575		if (lvl & P(LVL, HIT)) {
576			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
577			if (lvl & P(LVL, IO))  stats->ld_io++;
578			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
579			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
580			if (lvl & P(LVL, L2)) {
581				stats->ld_l2hit++;
582
583				if (snoopx & P(SNOOPX, PEER))
584					PEER_INC(lcl_peer);
585			}
586			if (lvl & P(LVL, L3 )) {
587				if (snoop & P(SNOOP, HITM))
588					HITM_INC(lcl_hitm);
589				else
590					stats->ld_llchit++;
591
592				if (snoopx & P(SNOOPX, PEER))
593					PEER_INC(lcl_peer);
594			}
595
596			if (lvl & P(LVL, LOC_RAM)) {
597				stats->lcl_dram++;
598				if (snoop & P(SNOOP, HIT))
599					stats->ld_shared++;
600				else
601					stats->ld_excl++;
602			}
603
604			if ((lvl & P(LVL, REM_RAM1)) ||
605			    (lvl & P(LVL, REM_RAM2)) ||
606			     mrem) {
607				stats->rmt_dram++;
608				if (snoop & P(SNOOP, HIT))
609					stats->ld_shared++;
610				else
611					stats->ld_excl++;
612			}
613		}
614
615		if ((lvl & P(LVL, REM_CCE1)) ||
616		    (lvl & P(LVL, REM_CCE2)) ||
617		     mrem) {
618			if (snoop & P(SNOOP, HIT)) {
619				stats->rmt_hit++;
620			} else if (snoop & P(SNOOP, HITM)) {
621				HITM_INC(rmt_hitm);
622			} else if (snoopx & P(SNOOPX, PEER)) {
623				stats->rmt_hit++;
624				PEER_INC(rmt_peer);
625			}
626		}
627
628		if ((lvl & P(LVL, MISS)))
629			stats->ld_miss++;
630
631	} else if (op & P(OP, STORE)) {
632		/* store */
633		stats->store++;
634
635		if (!daddr) {
636			stats->st_noadrs++;
637			return -1;
638		}
639
640		if (lvl & P(LVL, HIT)) {
641			if (lvl & P(LVL, UNC)) stats->st_uncache++;
642			if (lvl & P(LVL, L1 )) stats->st_l1hit++;
643		}
644		if (lvl & P(LVL, MISS))
645			if (lvl & P(LVL, L1)) stats->st_l1miss++;
646		if (lvl & P(LVL, NA))
647			stats->st_na++;
648	} else {
649		/* unparsable data_src? */
650		stats->noparse++;
651		return -1;
652	}
653
654	if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
655		stats->nomap++;
656		return -1;
657	}
658
659#undef P
660#undef HITM_INC
661	return err;
662}
663
664void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
665{
666	stats->nr_entries	+= add->nr_entries;
667
668	stats->locks		+= add->locks;
669	stats->store		+= add->store;
670	stats->st_uncache	+= add->st_uncache;
671	stats->st_noadrs	+= add->st_noadrs;
672	stats->st_l1hit		+= add->st_l1hit;
673	stats->st_l1miss	+= add->st_l1miss;
674	stats->st_na		+= add->st_na;
675	stats->load		+= add->load;
676	stats->ld_excl		+= add->ld_excl;
677	stats->ld_shared	+= add->ld_shared;
678	stats->ld_uncache	+= add->ld_uncache;
679	stats->ld_io		+= add->ld_io;
680	stats->ld_miss		+= add->ld_miss;
681	stats->ld_noadrs	+= add->ld_noadrs;
682	stats->ld_fbhit		+= add->ld_fbhit;
683	stats->ld_l1hit		+= add->ld_l1hit;
684	stats->ld_l2hit		+= add->ld_l2hit;
685	stats->ld_llchit	+= add->ld_llchit;
686	stats->lcl_hitm		+= add->lcl_hitm;
687	stats->rmt_hitm		+= add->rmt_hitm;
688	stats->tot_hitm		+= add->tot_hitm;
689	stats->lcl_peer		+= add->lcl_peer;
690	stats->rmt_peer		+= add->rmt_peer;
691	stats->tot_peer		+= add->tot_peer;
692	stats->rmt_hit		+= add->rmt_hit;
693	stats->lcl_dram		+= add->lcl_dram;
694	stats->rmt_dram		+= add->rmt_dram;
695	stats->blk_data		+= add->blk_data;
696	stats->blk_addr		+= add->blk_addr;
697	stats->nomap		+= add->nomap;
698	stats->noparse		+= add->noparse;
699}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stddef.h>
  3#include <stdlib.h>
  4#include <string.h>
  5#include <errno.h>
  6#include <sys/types.h>
  7#include <sys/stat.h>
  8#include <unistd.h>
  9#include <api/fs/fs.h>
 10#include <linux/kernel.h>
 11#include "map_symbol.h"
 12#include "mem-events.h"
 13#include "debug.h"
 14#include "symbol.h"
 15#include "pmu.h"
 16#include "pmus.h"
 17
 18unsigned int perf_mem_events__loads_ldlat = 30;
 19
 20#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
 21
 22struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
 23	E("ldlat-loads",	"%s/mem-loads,ldlat=%u/P",	"mem-loads",	true,	0),
 24	E("ldlat-stores",	"%s/mem-stores/P",		"mem-stores",	false,	0),
 25	E(NULL,			NULL,				NULL,		false,	0),
 26};
 27#undef E
 28
 29static char mem_loads_name[100];
 30static char mem_stores_name[100];
 31
 32struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
 33{
 34	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
 35		return NULL;
 36
 37	return &pmu->mem_events[i];
 38}
 39
 40static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
 41{
 42	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
 43		if (pmu->mem_events)
 44			return pmu;
 45	}
 46	return NULL;
 47}
 48
 49struct perf_pmu *perf_mem_events_find_pmu(void)
 50{
 51	/*
 52	 * The current perf mem doesn't support per-PMU configuration.
 53	 * The exact same configuration is applied to all the
 54	 * mem_events supported PMUs.
 55	 * Return the first mem_events supported PMU.
 56	 *
 57	 * Notes: The only case which may support multiple mem_events
 58	 * supported PMUs is Intel hybrid. The exact same mem_events
 59	 * is shared among the PMUs. Only configure the first PMU
 60	 * is good enough as well.
 61	 */
 62	return perf_pmus__scan_mem(NULL);
 63}
 64
 65/**
 66 * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
 67 * @pmu: Start pmu. If it's NULL, search the entire PMU list.
 68 */
 69int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
 70{
 71	int num = 0;
 72
 73	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
 74		num++;
 75
 76	return num;
 77}
 78
 79static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
 80{
 81	struct perf_mem_event *e;
 82
 83	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
 84		return NULL;
 85
 86	e = &pmu->mem_events[i];
 87	if (!e)
 88		return NULL;
 89
 90	if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
 91		if (e->ldlat) {
 92			if (!e->aux_event) {
 93				/* ARM and Most of Intel */
 94				scnprintf(mem_loads_name, sizeof(mem_loads_name),
 95					  e->name, pmu->name,
 96					  perf_mem_events__loads_ldlat);
 97			} else {
 98				/* Intel with mem-loads-aux event */
 99				scnprintf(mem_loads_name, sizeof(mem_loads_name),
100					  e->name, pmu->name, pmu->name,
101					  perf_mem_events__loads_ldlat);
102			}
103		} else {
104			if (!e->aux_event) {
105				/* AMD and POWER */
106				scnprintf(mem_loads_name, sizeof(mem_loads_name),
107					  e->name, pmu->name);
108			} else
109				return NULL;
110		}
111
112		return mem_loads_name;
113	}
114
115	if (i == PERF_MEM_EVENTS__STORE) {
116		scnprintf(mem_stores_name, sizeof(mem_stores_name),
117			  e->name, pmu->name);
118		return mem_stores_name;
119	}
120
121	return NULL;
122}
123
124bool is_mem_loads_aux_event(struct evsel *leader)
125{
126	struct perf_pmu *pmu = leader->pmu;
127	struct perf_mem_event *e;
128
129	if (!pmu || !pmu->mem_events)
130		return false;
131
132	e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
133	if (!e->aux_event)
134		return false;
135
136	return leader->core.attr.config == e->aux_event;
137}
138
139int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
140{
141	char *tok, *saveptr = NULL;
142	bool found = false;
143	char *buf;
144	int j;
145
146	/* We need buffer that we know we can write to. */
147	buf = malloc(strlen(str) + 1);
148	if (!buf)
149		return -ENOMEM;
150
151	strcpy(buf, str);
152
153	tok = strtok_r((char *)buf, ",", &saveptr);
154
155	while (tok) {
156		for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
157			struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
158
159			if (!e->tag)
160				continue;
161
162			if (strstr(e->tag, tok))
163				e->record = found = true;
164		}
165
166		tok = strtok_r(NULL, ",", &saveptr);
167	}
168
169	free(buf);
170
171	if (found)
172		return 0;
173
174	pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
175	return -1;
176}
177
178static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
179				      struct perf_mem_event *e)
180{
181	char path[PATH_MAX];
182	struct stat st;
183
184	if (!e->event_name)
185		return true;
186
187	scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
188
189	return !stat(path, &st);
190}
191
192int perf_pmu__mem_events_init(struct perf_pmu *pmu)
193{
194	const char *mnt = sysfs__mount();
195	bool found = false;
196	int j;
197
198	if (!mnt)
199		return -ENOENT;
200
201	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
202		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
 
 
203
204		/*
205		 * If the event entry isn't valid, skip initialization
206		 * and "e->supported" will keep false.
207		 */
208		if (!e->tag)
209			continue;
210
211		e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
 
 
 
 
 
 
 
 
 
 
 
212		if (e->supported)
213			found = true;
214	}
215
216	return found ? 0 : -ENOENT;
217}
218
219void perf_pmu__mem_events_list(struct perf_pmu *pmu)
220{
221	int j;
222
223	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
224		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
225
226		fprintf(stderr, "%-*s%-*s%s",
227			e->tag ? 13 : 0,
228			e->tag ? : "",
229			e->tag && verbose > 0 ? 25 : 0,
230			e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
231			e->supported ? ": available\n" : "");
232	}
233}
234
235int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
 
236{
237	const char *mnt = sysfs__mount();
238	struct perf_pmu *pmu = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239	struct perf_mem_event *e;
240	int i = *argv_nr;
241	const char *s;
242	char *copy;
243
244	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
245		for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
246			e = perf_pmu__mem_events_ptr(pmu, j);
247
248			if (!e->record)
249				continue;
 
 
250
 
251			if (!e->supported) {
252				pr_err("failed: event '%s' not supported\n",
253					perf_pmu__mem_events_name(j, pmu));
254				return -1;
255			}
256
257			s = perf_pmu__mem_events_name(j, pmu);
258			if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
259				continue;
260
261			copy = strdup(s);
262			if (!copy)
263				return -1;
 
264
265			rec_argv[i++] = "-e";
266			rec_argv[i++] = copy;
 
 
 
 
 
 
 
 
 
 
267		}
268	}
269
270	*argv_nr = i;
 
271	return 0;
272}
273
274static const char * const tlb_access[] = {
275	"N/A",
276	"HIT",
277	"MISS",
278	"L1",
279	"L2",
280	"Walker",
281	"Fault",
282};
283
284int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
285{
286	size_t l = 0, i;
287	u64 m = PERF_MEM_TLB_NA;
288	u64 hit, miss;
289
290	sz -= 1; /* -1 for null termination */
291	out[0] = '\0';
292
293	if (mem_info)
294		m = mem_info->data_src.mem_dtlb;
295
296	hit = m & PERF_MEM_TLB_HIT;
297	miss = m & PERF_MEM_TLB_MISS;
298
299	/* already taken care of */
300	m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
301
302	for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
303		if (!(m & 0x1))
304			continue;
305		if (l) {
306			strcat(out, " or ");
307			l += 4;
308		}
309		l += scnprintf(out + l, sz - l, tlb_access[i]);
310	}
311	if (*out == '\0')
312		l += scnprintf(out, sz - l, "N/A");
313	if (hit)
314		l += scnprintf(out + l, sz - l, " hit");
315	if (miss)
316		l += scnprintf(out + l, sz - l, " miss");
317
318	return l;
319}
320
321static const char * const mem_lvl[] = {
322	"N/A",
323	"HIT",
324	"MISS",
325	"L1",
326	"LFB/MAB",
327	"L2",
328	"L3",
329	"Local RAM",
330	"Remote RAM (1 hop)",
331	"Remote RAM (2 hops)",
332	"Remote Cache (1 hop)",
333	"Remote Cache (2 hops)",
334	"I/O",
335	"Uncached",
336};
337
338static const char * const mem_lvlnum[] = {
339	[PERF_MEM_LVLNUM_UNC] = "Uncached",
340	[PERF_MEM_LVLNUM_CXL] = "CXL",
341	[PERF_MEM_LVLNUM_IO] = "I/O",
342	[PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
343	[PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
344	[PERF_MEM_LVLNUM_RAM] = "RAM",
345	[PERF_MEM_LVLNUM_PMEM] = "PMEM",
346	[PERF_MEM_LVLNUM_NA] = "N/A",
347};
348
349static const char * const mem_hops[] = {
350	"N/A",
351	/*
352	 * While printing, 'Remote' will be added to represent
353	 * 'Remote core, same node' accesses as remote field need
354	 * to be set with mem_hops field.
355	 */
356	"core, same node",
357	"node, same socket",
358	"socket, same board",
359	"board",
360};
361
362static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
363{
364	u64 op = PERF_MEM_LOCK_NA;
365	int l;
366
367	if (mem_info)
368		op = mem_info->data_src.mem_op;
369
370	if (op & PERF_MEM_OP_NA)
371		l = scnprintf(out, sz, "N/A");
372	else if (op & PERF_MEM_OP_LOAD)
373		l = scnprintf(out, sz, "LOAD");
374	else if (op & PERF_MEM_OP_STORE)
375		l = scnprintf(out, sz, "STORE");
376	else if (op & PERF_MEM_OP_PFETCH)
377		l = scnprintf(out, sz, "PFETCH");
378	else if (op & PERF_MEM_OP_EXEC)
379		l = scnprintf(out, sz, "EXEC");
380	else
381		l = scnprintf(out, sz, "No");
382
383	return l;
384}
385
386int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
387{
388	union perf_mem_data_src data_src;
 
 
389	int printed = 0;
390	size_t l = 0;
391	size_t i;
392	int lvl;
393	char hit_miss[5] = {0};
394
395	sz -= 1; /* -1 for null termination */
396	out[0] = '\0';
397
398	if (!mem_info)
399		goto na;
400
401	data_src = mem_info->data_src;
 
 
 
 
 
 
402
403	if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
404		memcpy(hit_miss, "hit", 3);
405	else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
406		memcpy(hit_miss, "miss", 4);
407
408	lvl = data_src.mem_lvl_num;
409	if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
410		if (data_src.mem_remote) {
411			strcat(out, "Remote ");
412			l += 7;
 
 
 
 
 
413		}
414
415		if (data_src.mem_hops)
416			l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
417
418		if (mem_lvlnum[lvl])
419			l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
420		else
421			l += scnprintf(out + l, sz - l, "L%d", lvl);
422
423		l += scnprintf(out + l, sz - l, " %s", hit_miss);
424		return l;
425	}
426
427	lvl = data_src.mem_lvl;
428	if (!lvl)
429		goto na;
430
431	lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
432	if (!lvl)
433		goto na;
434
435	for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
436		if (!(lvl & 0x1))
437			continue;
438		if (printed++) {
439			strcat(out, " or ");
440			l += 4;
441		}
442		l += scnprintf(out + l, sz - l, mem_lvl[i]);
 
 
 
443	}
444
445	if (printed) {
446		l += scnprintf(out + l, sz - l, " %s", hit_miss);
447		return l;
448	}
 
 
449
450na:
451	strcat(out, "N/A");
452	return 3;
453}
454
455static const char * const snoop_access[] = {
456	"N/A",
457	"None",
458	"Hit",
459	"Miss",
460	"HitM",
461};
462
463static const char * const snoopx_access[] = {
464	"Fwd",
465	"Peer",
466};
467
468int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
469{
470	size_t i, l = 0;
471	u64 m = PERF_MEM_SNOOP_NA;
472
473	sz -= 1; /* -1 for null termination */
474	out[0] = '\0';
475
476	if (mem_info)
477		m = mem_info->data_src.mem_snoop;
478
479	for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
480		if (!(m & 0x1))
481			continue;
482		if (l) {
483			strcat(out, " or ");
484			l += 4;
485		}
486		l += scnprintf(out + l, sz - l, snoop_access[i]);
487	}
488
489	m = 0;
490	if (mem_info)
491		m = mem_info->data_src.mem_snoopx;
492
493	for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
494		if (!(m & 0x1))
495			continue;
496
497		if (l) {
498			strcat(out, " or ");
499			l += 4;
500		}
501		l += scnprintf(out + l, sz - l, snoopx_access[i]);
502	}
503
504	if (*out == '\0')
505		l += scnprintf(out, sz - l, "N/A");
506
507	return l;
508}
509
510int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
511{
512	u64 mask = PERF_MEM_LOCK_NA;
513	int l;
514
515	if (mem_info)
516		mask = mem_info->data_src.mem_lock;
517
518	if (mask & PERF_MEM_LOCK_NA)
519		l = scnprintf(out, sz, "N/A");
520	else if (mask & PERF_MEM_LOCK_LOCKED)
521		l = scnprintf(out, sz, "Yes");
522	else
523		l = scnprintf(out, sz, "No");
524
525	return l;
526}
527
528int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
529{
530	size_t l = 0;
531	u64 mask = PERF_MEM_BLK_NA;
532
533	sz -= 1; /* -1 for null termination */
534	out[0] = '\0';
535
536	if (mem_info)
537		mask = mem_info->data_src.mem_blk;
538
539	if (!mask || (mask & PERF_MEM_BLK_NA)) {
540		l += scnprintf(out + l, sz - l, " N/A");
541		return l;
542	}
543	if (mask & PERF_MEM_BLK_DATA)
544		l += scnprintf(out + l, sz - l, " Data");
545	if (mask & PERF_MEM_BLK_ADDR)
546		l += scnprintf(out + l, sz - l, " Addr");
547
548	return l;
549}
550
551int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
552{
553	int i = 0;
554
555	i += scnprintf(out, sz, "|OP ");
556	i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
557	i += scnprintf(out + i, sz - i, "|LVL ");
558	i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
559	i += scnprintf(out + i, sz - i, "|SNP ");
560	i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
561	i += scnprintf(out + i, sz - i, "|TLB ");
562	i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
563	i += scnprintf(out + i, sz - i, "|LCK ");
564	i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
565	i += scnprintf(out + i, sz - i, "|BLK ");
566	i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
567
568	return i;
569}
570
571int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
572{
573	union perf_mem_data_src *data_src = &mi->data_src;
574	u64 daddr  = mi->daddr.addr;
575	u64 op     = data_src->mem_op;
576	u64 lvl    = data_src->mem_lvl;
577	u64 snoop  = data_src->mem_snoop;
578	u64 snoopx = data_src->mem_snoopx;
579	u64 lock   = data_src->mem_lock;
580	u64 blk    = data_src->mem_blk;
581	/*
582	 * Skylake might report unknown remote level via this
583	 * bit, consider it when evaluating remote HITMs.
584	 *
585	 * Incase of power, remote field can also be used to denote cache
586	 * accesses from the another core of same node. Hence, setting
587	 * mrem only when HOPS is zero along with set remote field.
588	 */
589	bool mrem  = (data_src->mem_remote && !data_src->mem_hops);
590	int err = 0;
591
592#define HITM_INC(__f)		\
593do {				\
594	stats->__f++;		\
595	stats->tot_hitm++;	\
596} while (0)
597
598#define PEER_INC(__f)		\
599do {				\
600	stats->__f++;		\
601	stats->tot_peer++;	\
602} while (0)
603
604#define P(a, b) PERF_MEM_##a##_##b
605
606	stats->nr_entries++;
607
608	if (lock & P(LOCK, LOCKED)) stats->locks++;
609
610	if (blk & P(BLK, DATA)) stats->blk_data++;
611	if (blk & P(BLK, ADDR)) stats->blk_addr++;
612
613	if (op & P(OP, LOAD)) {
614		/* load */
615		stats->load++;
616
617		if (!daddr) {
618			stats->ld_noadrs++;
619			return -1;
620		}
621
622		if (lvl & P(LVL, HIT)) {
623			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
624			if (lvl & P(LVL, IO))  stats->ld_io++;
625			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
626			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
627			if (lvl & P(LVL, L2)) {
628				stats->ld_l2hit++;
629
630				if (snoopx & P(SNOOPX, PEER))
631					PEER_INC(lcl_peer);
632			}
633			if (lvl & P(LVL, L3 )) {
634				if (snoop & P(SNOOP, HITM))
635					HITM_INC(lcl_hitm);
636				else
637					stats->ld_llchit++;
638
639				if (snoopx & P(SNOOPX, PEER))
640					PEER_INC(lcl_peer);
641			}
642
643			if (lvl & P(LVL, LOC_RAM)) {
644				stats->lcl_dram++;
645				if (snoop & P(SNOOP, HIT))
646					stats->ld_shared++;
647				else
648					stats->ld_excl++;
649			}
650
651			if ((lvl & P(LVL, REM_RAM1)) ||
652			    (lvl & P(LVL, REM_RAM2)) ||
653			     mrem) {
654				stats->rmt_dram++;
655				if (snoop & P(SNOOP, HIT))
656					stats->ld_shared++;
657				else
658					stats->ld_excl++;
659			}
660		}
661
662		if ((lvl & P(LVL, REM_CCE1)) ||
663		    (lvl & P(LVL, REM_CCE2)) ||
664		     mrem) {
665			if (snoop & P(SNOOP, HIT)) {
666				stats->rmt_hit++;
667			} else if (snoop & P(SNOOP, HITM)) {
668				HITM_INC(rmt_hitm);
669			} else if (snoopx & P(SNOOPX, PEER)) {
670				stats->rmt_hit++;
671				PEER_INC(rmt_peer);
672			}
673		}
674
675		if ((lvl & P(LVL, MISS)))
676			stats->ld_miss++;
677
678	} else if (op & P(OP, STORE)) {
679		/* store */
680		stats->store++;
681
682		if (!daddr) {
683			stats->st_noadrs++;
684			return -1;
685		}
686
687		if (lvl & P(LVL, HIT)) {
688			if (lvl & P(LVL, UNC)) stats->st_uncache++;
689			if (lvl & P(LVL, L1 )) stats->st_l1hit++;
690		}
691		if (lvl & P(LVL, MISS))
692			if (lvl & P(LVL, L1)) stats->st_l1miss++;
693		if (lvl & P(LVL, NA))
694			stats->st_na++;
695	} else {
696		/* unparsable data_src? */
697		stats->noparse++;
698		return -1;
699	}
700
701	if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
702		stats->nomap++;
703		return -1;
704	}
705
706#undef P
707#undef HITM_INC
708	return err;
709}
710
711void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
712{
713	stats->nr_entries	+= add->nr_entries;
714
715	stats->locks		+= add->locks;
716	stats->store		+= add->store;
717	stats->st_uncache	+= add->st_uncache;
718	stats->st_noadrs	+= add->st_noadrs;
719	stats->st_l1hit		+= add->st_l1hit;
720	stats->st_l1miss	+= add->st_l1miss;
721	stats->st_na		+= add->st_na;
722	stats->load		+= add->load;
723	stats->ld_excl		+= add->ld_excl;
724	stats->ld_shared	+= add->ld_shared;
725	stats->ld_uncache	+= add->ld_uncache;
726	stats->ld_io		+= add->ld_io;
727	stats->ld_miss		+= add->ld_miss;
728	stats->ld_noadrs	+= add->ld_noadrs;
729	stats->ld_fbhit		+= add->ld_fbhit;
730	stats->ld_l1hit		+= add->ld_l1hit;
731	stats->ld_l2hit		+= add->ld_l2hit;
732	stats->ld_llchit	+= add->ld_llchit;
733	stats->lcl_hitm		+= add->lcl_hitm;
734	stats->rmt_hitm		+= add->rmt_hitm;
735	stats->tot_hitm		+= add->tot_hitm;
736	stats->lcl_peer		+= add->lcl_peer;
737	stats->rmt_peer		+= add->rmt_peer;
738	stats->tot_peer		+= add->tot_peer;
739	stats->rmt_hit		+= add->rmt_hit;
740	stats->lcl_dram		+= add->lcl_dram;
741	stats->rmt_dram		+= add->rmt_dram;
742	stats->blk_data		+= add->blk_data;
743	stats->blk_addr		+= add->blk_addr;
744	stats->nomap		+= add->nomap;
745	stats->noparse		+= add->noparse;
746}