Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/list.h>
  3#include <linux/list_sort.h>
  4#include <linux/string.h>
  5#include <linux/zalloc.h>
  6#include <subcmd/pager.h>
  7#include <sys/types.h>
  8#include <ctype.h>
  9#include <dirent.h>
 10#include <pthread.h>
 11#include <string.h>
 12#include <unistd.h>
 13#include "cpumap.h"
 14#include "debug.h"
 15#include "evsel.h"
 16#include "pmus.h"
 17#include "pmu.h"
 18#include "hwmon_pmu.h"
 19#include "tool_pmu.h"
 20#include "print-events.h"
 21#include "strbuf.h"
 22
 23/*
 24 * core_pmus:  A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
 25 *             directory contains "cpus" file. All PMUs belonging to core_pmus
 26 *             must have pmu->is_core=1. If there are more than one PMU in
 27 *             this list, perf interprets it as a heterogeneous platform.
 28 *             (FWIW, certain ARM platforms having heterogeneous cores uses
 29 *             homogeneous PMU, and thus they are treated as homogeneous
 30 *             platform by perf because core_pmus will have only one entry)
 31 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
 32 *             matter whether PMU is present per SMT-thread or outside of the
 33 *             core in the hw. For e.g., an instance of AMD ibs_fetch// and
 34 *             ibs_op// PMUs is present in each hw SMT thread, however they
 35 *             are captured under other_pmus. PMUs belonging to other_pmus
 36 *             must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
 37 */
 38static LIST_HEAD(core_pmus);
 39static LIST_HEAD(other_pmus);
 40static bool read_sysfs_core_pmus;
 41static bool read_sysfs_all_pmus;
 42
 43static void pmu_read_sysfs(bool core_only);
 44
 45size_t pmu_name_len_no_suffix(const char *str)
 46{
 47	int orig_len, len;
 48	bool has_hex_digits = false;
 49
 50	orig_len = len = strlen(str);
 51
 52	/* Count trailing digits. */
 53	while (len > 0 && isxdigit(str[len - 1])) {
 54		if (!isdigit(str[len - 1]))
 55			has_hex_digits = true;
 56		len--;
 57	}
 58
 59	if (len > 0 && len != orig_len && str[len - 1] == '_') {
 60		/*
 61		 * There is a '_{num}' suffix. For decimal suffixes any length
 62		 * will do, for hexadecimal ensure more than 2 hex digits so
 63		 * that S390's cpum_cf PMU doesn't match.
 64		 */
 65		if (!has_hex_digits || (orig_len - len) > 2)
 66			return len - 1;
 67	}
 68	/* Use the full length. */
 69	return orig_len;
 70}
 71
 72int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
 73{
 74	unsigned long long lhs_num = 0, rhs_num = 0;
 75	size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
 76	size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
 77	int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
 78			lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
 79
 80	if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
 81		return ret;
 82
 83	if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
 84		lhs_num = strtoull(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
 85	if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
 86		rhs_num = strtoull(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
 87
 88	return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
 89}
 90
 91void perf_pmus__destroy(void)
 92{
 93	struct perf_pmu *pmu, *tmp;
 94
 95	list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
 96		list_del(&pmu->list);
 97
 98		perf_pmu__delete(pmu);
 99	}
100	list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
101		list_del(&pmu->list);
102
103		perf_pmu__delete(pmu);
104	}
105	read_sysfs_core_pmus = false;
106	read_sysfs_all_pmus = false;
107}
108
109static struct perf_pmu *pmu_find(const char *name)
110{
111	struct perf_pmu *pmu;
112
113	list_for_each_entry(pmu, &core_pmus, list) {
114		if (!strcmp(pmu->name, name) ||
115		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
116			return pmu;
117	}
118	list_for_each_entry(pmu, &other_pmus, list) {
119		if (!strcmp(pmu->name, name) ||
120		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
121			return pmu;
122	}
123
124	return NULL;
125}
126
127struct perf_pmu *perf_pmus__find(const char *name)
128{
129	struct perf_pmu *pmu;
130	int dirfd;
131	bool core_pmu;
132
133	/*
134	 * Once PMU is loaded it stays in the list,
135	 * so we keep us from multiple reading/parsing
136	 * the pmu format definitions.
137	 */
138	pmu = pmu_find(name);
139	if (pmu)
140		return pmu;
141
142	if (read_sysfs_all_pmus)
143		return NULL;
144
145	core_pmu = is_pmu_core(name);
146	if (core_pmu && read_sysfs_core_pmus)
147		return NULL;
148
149	dirfd = perf_pmu__event_source_devices_fd();
150	pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
151			       /*eager_load=*/false);
152	close(dirfd);
153
154	if (!pmu) {
155		/*
156		 * Looking up an inidividual PMU failed. This may mean name is
157		 * an alias, so read the PMUs from sysfs and try to find again.
158		 */
159		pmu_read_sysfs(core_pmu);
160		pmu = pmu_find(name);
161	}
162	return pmu;
163}
164
165static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
166{
167	struct perf_pmu *pmu;
168	bool core_pmu;
169
170	/*
171	 * Once PMU is loaded it stays in the list,
172	 * so we keep us from multiple reading/parsing
173	 * the pmu format definitions.
174	 */
175	pmu = pmu_find(name);
176	if (pmu)
177		return pmu;
178
179	if (read_sysfs_all_pmus)
180		return NULL;
181
182	core_pmu = is_pmu_core(name);
183	if (core_pmu && read_sysfs_core_pmus)
184		return NULL;
185
186	return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
187				/*eager_load=*/false);
188}
189
190static int pmus_cmp(void *priv __maybe_unused,
191		    const struct list_head *lhs, const struct list_head *rhs)
192{
193	struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
194	struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
195
196	return pmu_name_cmp(lhs_pmu->name ?: "", rhs_pmu->name ?: "");
197}
198
199/* Add all pmus in sysfs to pmu list: */
200static void pmu_read_sysfs(bool core_only)
201{
202	int fd;
203	DIR *dir;
204	struct dirent *dent;
205	struct perf_pmu *tool_pmu;
206
207	if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
208		return;
209
210	fd = perf_pmu__event_source_devices_fd();
211	if (fd < 0)
212		return;
213
214	dir = fdopendir(fd);
215	if (!dir) {
216		close(fd);
217		return;
218	}
219
220	while ((dent = readdir(dir))) {
221		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
222			continue;
223		if (core_only && !is_pmu_core(dent->d_name))
224			continue;
225		/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
226		perf_pmu__find2(fd, dent->d_name);
227	}
228
229	closedir(dir);
230	if (list_empty(&core_pmus)) {
231		if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
232			pr_err("Failure to set up any core PMUs\n");
233	}
234	list_sort(NULL, &core_pmus, pmus_cmp);
235	if (!core_only) {
236		tool_pmu = perf_pmus__tool_pmu();
237		list_add_tail(&tool_pmu->list, &other_pmus);
238		perf_pmus__read_hwmon_pmus(&other_pmus);
239	}
240	list_sort(NULL, &other_pmus, pmus_cmp);
241	if (!list_empty(&core_pmus)) {
242		read_sysfs_core_pmus = true;
243		if (!core_only)
244			read_sysfs_all_pmus = true;
245	}
246}
247
248static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
249{
250	struct perf_pmu *pmu;
251
252	list_for_each_entry(pmu, &core_pmus, list) {
253		if (pmu->type == type)
254			return pmu;
255	}
256
257	list_for_each_entry(pmu, &other_pmus, list) {
258		if (pmu->type == type)
259			return pmu;
260	}
261	return NULL;
262}
263
264struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
265{
266	struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
267
268	if (pmu || read_sysfs_all_pmus)
269		return pmu;
270
271	pmu_read_sysfs(/*core_only=*/false);
272	pmu = __perf_pmus__find_by_type(type);
273	return pmu;
274}
275
276/*
277 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
278 * next pmu. Returns NULL on end.
279 */
280struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
281{
282	bool use_core_pmus = !pmu || pmu->is_core;
283
284	if (!pmu) {
285		pmu_read_sysfs(/*core_only=*/false);
286		pmu = list_prepare_entry(pmu, &core_pmus, list);
287	}
288	if (use_core_pmus) {
289		list_for_each_entry_continue(pmu, &core_pmus, list)
290			return pmu;
291
292		pmu = NULL;
293		pmu = list_prepare_entry(pmu, &other_pmus, list);
294	}
295	list_for_each_entry_continue(pmu, &other_pmus, list)
296		return pmu;
297	return NULL;
298}
299
300struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
301{
302	if (!pmu) {
303		pmu_read_sysfs(/*core_only=*/true);
304		return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
305	}
306	list_for_each_entry_continue(pmu, &core_pmus, list)
307		return pmu;
308
309	return NULL;
310}
311
312static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
313{
314	bool use_core_pmus = !pmu || pmu->is_core;
315	int last_pmu_name_len = 0;
316	const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
317
318	if (!pmu) {
319		pmu_read_sysfs(/*core_only=*/false);
320		pmu = list_prepare_entry(pmu, &core_pmus, list);
321	} else
322		last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
323
324	if (use_core_pmus) {
325		list_for_each_entry_continue(pmu, &core_pmus, list) {
326			int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
327
328			if (last_pmu_name_len == pmu_name_len &&
329			    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
330				continue;
331
332			return pmu;
333		}
334		pmu = NULL;
335		pmu = list_prepare_entry(pmu, &other_pmus, list);
336	}
337	list_for_each_entry_continue(pmu, &other_pmus, list) {
338		int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
339
340		if (last_pmu_name_len == pmu_name_len &&
341		    !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
342			continue;
343
344		return pmu;
345	}
346	return NULL;
347}
348
349const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
350{
351	struct perf_pmu *pmu = NULL;
352
353	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
354		if (!strcmp(pmu->name, str))
355			return pmu;
356		/* Ignore "uncore_" prefix. */
357		if (!strncmp(pmu->name, "uncore_", 7)) {
358			if (!strcmp(pmu->name + 7, str))
359				return pmu;
360		}
361		/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
362		if (!strncmp(pmu->name, "cpu_", 4)) {
363			if (!strcmp(pmu->name + 4, str))
364				return pmu;
365		}
366	}
367	return NULL;
368}
369
370/** Struct for ordering events as output in perf list. */
371struct sevent {
372	/** PMU for event. */
373	const struct perf_pmu *pmu;
374	const char *name;
375	const char* alias;
376	const char *scale_unit;
377	const char *desc;
378	const char *long_desc;
379	const char *encoding_desc;
380	const char *topic;
381	const char *pmu_name;
382	const char *event_type_desc;
383	bool deprecated;
384};
385
386static int cmp_sevent(const void *a, const void *b)
387{
388	const struct sevent *as = a;
389	const struct sevent *bs = b;
390	bool a_iscpu, b_iscpu;
391	int ret;
392
393	/* Put extra events last. */
394	if (!!as->desc != !!bs->desc)
395		return !!as->desc - !!bs->desc;
396
397	/* Order by topics. */
398	ret = strcmp(as->topic ?: "", bs->topic ?: "");
399	if (ret)
400		return ret;
401
402	/* Order CPU core events to be first */
403	a_iscpu = as->pmu ? as->pmu->is_core : true;
404	b_iscpu = bs->pmu ? bs->pmu->is_core : true;
405	if (a_iscpu != b_iscpu)
406		return a_iscpu ? -1 : 1;
407
408	/* Order by PMU name. */
409	if (as->pmu != bs->pmu) {
410		ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
411		if (ret)
412			return ret;
413	}
414
415	/* Order by event name. */
416	return strcmp(as->name, bs->name);
417}
418
419static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
420{
421	/* Different names -> never duplicates */
422	if (strcmp(a->name ?: "//", b->name ?: "//"))
423		return false;
424
425	/* Don't remove duplicates for different PMUs */
426	return strcmp(a->pmu_name, b->pmu_name) == 0;
427}
428
429struct events_callback_state {
430	struct sevent *aliases;
431	size_t aliases_len;
432	size_t index;
433};
434
435static int perf_pmus__print_pmu_events__callback(void *vstate,
436						struct pmu_event_info *info)
437{
438	struct events_callback_state *state = vstate;
439	struct sevent *s;
440
441	if (state->index >= state->aliases_len) {
442		pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
443		return 1;
444	}
445	assert(info->pmu != NULL || info->name != NULL);
446	s = &state->aliases[state->index];
447	s->pmu = info->pmu;
448#define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
449	COPY_STR(name);
450	COPY_STR(alias);
451	COPY_STR(scale_unit);
452	COPY_STR(desc);
453	COPY_STR(long_desc);
454	COPY_STR(encoding_desc);
455	COPY_STR(topic);
456	COPY_STR(pmu_name);
457	COPY_STR(event_type_desc);
458#undef COPY_STR
459	s->deprecated = info->deprecated;
460	state->index++;
461	return 0;
462}
463
464void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
465{
466	struct perf_pmu *pmu;
467	int printed = 0;
468	int len;
469	struct sevent *aliases;
470	struct events_callback_state state;
471	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
472	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
473
474	if (skip_duplicate_pmus)
475		scan_fn = perf_pmus__scan_skip_duplicates;
476	else
477		scan_fn = perf_pmus__scan;
478
479	pmu = NULL;
480	len = 0;
481	while ((pmu = scan_fn(pmu)) != NULL)
482		len += perf_pmu__num_events(pmu);
483
484	aliases = zalloc(sizeof(struct sevent) * len);
485	if (!aliases) {
486		pr_err("FATAL: not enough memory to print PMU events\n");
487		return;
488	}
489	pmu = NULL;
490	state = (struct events_callback_state) {
491		.aliases = aliases,
492		.aliases_len = len,
493		.index = 0,
494	};
495	while ((pmu = scan_fn(pmu)) != NULL) {
496		perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
497					 perf_pmus__print_pmu_events__callback);
498	}
499	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
500	for (int j = 0; j < len; j++) {
501		/* Skip duplicates */
502		if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
503			goto free;
504
505		print_cb->print_event(print_state,
506				aliases[j].topic,
507				aliases[j].pmu_name,
508				aliases[j].name,
509				aliases[j].alias,
510				aliases[j].scale_unit,
511				aliases[j].deprecated,
512				aliases[j].event_type_desc,
513				aliases[j].desc,
514				aliases[j].long_desc,
515				aliases[j].encoding_desc);
516free:
517		zfree(&aliases[j].name);
518		zfree(&aliases[j].alias);
519		zfree(&aliases[j].scale_unit);
520		zfree(&aliases[j].desc);
521		zfree(&aliases[j].long_desc);
522		zfree(&aliases[j].encoding_desc);
523		zfree(&aliases[j].topic);
524		zfree(&aliases[j].pmu_name);
525		zfree(&aliases[j].event_type_desc);
526	}
527	if (printed && pager_in_use())
528		printf("\n");
529
530	zfree(&aliases);
531}
532
533struct build_format_string_args {
534	struct strbuf short_string;
535	struct strbuf long_string;
536	int num_formats;
537};
538
539static int build_format_string(void *state, const char *name, int config,
540			       const unsigned long *bits)
541{
542	struct build_format_string_args *args = state;
543	unsigned int num_bits;
544	int ret1, ret2 = 0;
545
546	(void)config;
547	args->num_formats++;
548	if (args->num_formats > 1) {
549		strbuf_addch(&args->long_string, ',');
550		if (args->num_formats < 4)
551			strbuf_addch(&args->short_string, ',');
552	}
553	num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0;
554	if (num_bits <= 1) {
555		ret1 = strbuf_addf(&args->long_string, "%s", name);
556		if (args->num_formats < 4)
557			ret2 = strbuf_addf(&args->short_string, "%s", name);
558	} else if (num_bits > 8) {
559		ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name,
560				   ULLONG_MAX >> (64 - num_bits));
561		if (args->num_formats < 4) {
562			ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name,
563					   ULLONG_MAX >> (64 - num_bits));
564		}
565	} else {
566		ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name,
567				  ULLONG_MAX >> (64 - num_bits));
568		if (args->num_formats < 4) {
569			ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name,
570					   ULLONG_MAX >> (64 - num_bits));
571		}
572	}
573	return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0);
574}
575
576void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state)
577{
578	bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
579	struct perf_pmu *(*scan_fn)(struct perf_pmu *);
580	struct perf_pmu *pmu = NULL;
581
582	if (skip_duplicate_pmus)
583		scan_fn = perf_pmus__scan_skip_duplicates;
584	else
585		scan_fn = perf_pmus__scan;
586
587	while ((pmu = scan_fn(pmu)) != NULL) {
588		struct build_format_string_args format_args = {
589			.short_string = STRBUF_INIT,
590			.long_string = STRBUF_INIT,
591			.num_formats = 0,
592		};
593		int len = pmu_name_len_no_suffix(pmu->name);
594		const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
595
596		if (!pmu->is_core)
597			desc = NULL;
598
599		strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name);
600		strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name);
601		perf_pmu__for_each_format(pmu, &format_args, build_format_string);
602
603		if (format_args.num_formats > 3)
604			strbuf_addf(&format_args.short_string, ",.../modifier");
605		else
606			strbuf_addf(&format_args.short_string, "/modifier");
607
608		strbuf_addf(&format_args.long_string, "/modifier");
609		print_cb->print_event(print_state,
610				/*topic=*/NULL,
611				/*pmu_name=*/NULL,
612				format_args.short_string.buf,
613				/*event_alias=*/NULL,
614				/*scale_unit=*/NULL,
615				/*deprecated=*/false,
616				"Raw event descriptor",
617				desc,
618				/*long_desc=*/NULL,
619				format_args.long_string.buf);
620
621		strbuf_release(&format_args.short_string);
622		strbuf_release(&format_args.long_string);
623	}
624}
625
626bool perf_pmus__have_event(const char *pname, const char *name)
627{
628	struct perf_pmu *pmu = perf_pmus__find(pname);
629
630	return pmu && perf_pmu__have_event(pmu, name);
631}
632
633int perf_pmus__num_core_pmus(void)
634{
635	static int count;
636
637	if (!count) {
638		struct perf_pmu *pmu = NULL;
639
640		while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
641			count++;
642	}
643	return count;
644}
645
646static bool __perf_pmus__supports_extended_type(void)
647{
648	struct perf_pmu *pmu = NULL;
649
650	if (perf_pmus__num_core_pmus() <= 1)
651		return false;
652
653	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
654		if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
655			return false;
656	}
657
658	return true;
659}
660
661static bool perf_pmus__do_support_extended_type;
662
663static void perf_pmus__init_supports_extended_type(void)
664{
665	perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
666}
667
668bool perf_pmus__supports_extended_type(void)
669{
670	static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
671
672	pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
673
674	return perf_pmus__do_support_extended_type;
675}
676
677char *perf_pmus__default_pmu_name(void)
678{
679	int fd;
680	DIR *dir;
681	struct dirent *dent;
682	char *result = NULL;
683
684	if (!list_empty(&core_pmus))
685		return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
686
687	fd = perf_pmu__event_source_devices_fd();
688	if (fd < 0)
689		return strdup("cpu");
690
691	dir = fdopendir(fd);
692	if (!dir) {
693		close(fd);
694		return strdup("cpu");
695	}
696
697	while ((dent = readdir(dir))) {
698		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
699			continue;
700		if (is_pmu_core(dent->d_name)) {
701			result = strdup(dent->d_name);
702			break;
703		}
704	}
705
706	closedir(dir);
707	return result ?: strdup("cpu");
708}
709
710struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
711{
712	struct perf_pmu *pmu = evsel->pmu;
713
714	if (!pmu) {
715		pmu = perf_pmus__find_by_type(evsel->core.attr.type);
716		((struct evsel *)evsel)->pmu = pmu;
717	}
718	return pmu;
719}
720
721struct perf_pmu *perf_pmus__find_core_pmu(void)
722{
723	return perf_pmus__scan_core(NULL);
724}
725
726struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
727{
728	/*
729	 * Some PMU functions read from the sysfs mount point, so care is
730	 * needed, hence passing the eager_load flag to load things like the
731	 * format files.
732	 */
733	return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
734}
735
736struct perf_pmu *perf_pmus__add_test_hwmon_pmu(int hwmon_dir,
737					       const char *sysfs_name,
738					       const char *name)
739{
740	return hwmon_pmu__new(&other_pmus, hwmon_dir, sysfs_name, name);
741}
742
743struct perf_pmu *perf_pmus__fake_pmu(void)
744{
745	static struct perf_pmu fake = {
746		.name = "fake",
747		.type = PERF_PMU_TYPE_FAKE,
748		.format = LIST_HEAD_INIT(fake.format),
749	};
750
751	return &fake;
752}