Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <sys/param.h>
  3#include <sys/utsname.h>
  4#include <inttypes.h>
  5#include <stdlib.h>
  6#include <string.h>
  7#include <api/fs/fs.h>
  8#include <linux/zalloc.h>
  9#include <perf/cpumap.h>
 10
 11#include "cputopo.h"
 12#include "cpumap.h"
 13#include "debug.h"
 14#include "env.h"
 15#include "pmu-hybrid.h"
 16
 17#define PACKAGE_CPUS_FMT \
 18	"%s/devices/system/cpu/cpu%d/topology/package_cpus_list"
 19#define PACKAGE_CPUS_FMT_OLD \
 20	"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
 21#define DIE_CPUS_FMT \
 22	"%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
 23#define CORE_CPUS_FMT \
 24	"%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
 25#define CORE_CPUS_FMT_OLD \
 26	"%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
 
 
 27#define NODE_ONLINE_FMT \
 28	"%s/devices/system/node/online"
 29#define NODE_MEMINFO_FMT \
 30	"%s/devices/system/node/node%d/meminfo"
 31#define NODE_CPULIST_FMT \
 32	"%s/devices/system/node/node%d/cpulist"
 33
 34static int build_cpu_topology(struct cpu_topology *tp, int cpu)
 35{
 36	FILE *fp;
 37	char filename[MAXPATHLEN];
 38	char *buf = NULL, *p;
 39	size_t len = 0;
 40	ssize_t sret;
 41	u32 i = 0;
 42	int ret = -1;
 43
 44	scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT,
 45		  sysfs__mountpoint(), cpu);
 46	if (access(filename, F_OK) == -1) {
 47		scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT_OLD,
 48			sysfs__mountpoint(), cpu);
 49	}
 50	fp = fopen(filename, "r");
 51	if (!fp)
 52		goto try_dies;
 53
 54	sret = getline(&buf, &len, fp);
 55	fclose(fp);
 56	if (sret <= 0)
 57		goto try_dies;
 58
 59	p = strchr(buf, '\n');
 60	if (p)
 61		*p = '\0';
 62
 63	for (i = 0; i < tp->package_cpus_lists; i++) {
 64		if (!strcmp(buf, tp->package_cpus_list[i]))
 65			break;
 66	}
 67	if (i == tp->package_cpus_lists) {
 68		tp->package_cpus_list[i] = buf;
 69		tp->package_cpus_lists++;
 70		buf = NULL;
 71		len = 0;
 72	}
 73	ret = 0;
 74
 75try_dies:
 76	if (!tp->die_cpus_list)
 77		goto try_threads;
 78
 79	scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
 80		  sysfs__mountpoint(), cpu);
 81	fp = fopen(filename, "r");
 82	if (!fp)
 83		goto try_threads;
 84
 85	sret = getline(&buf, &len, fp);
 86	fclose(fp);
 87	if (sret <= 0)
 88		goto try_threads;
 89
 90	p = strchr(buf, '\n');
 91	if (p)
 92		*p = '\0';
 93
 94	for (i = 0; i < tp->die_cpus_lists; i++) {
 95		if (!strcmp(buf, tp->die_cpus_list[i]))
 96			break;
 97	}
 98	if (i == tp->die_cpus_lists) {
 99		tp->die_cpus_list[i] = buf;
100		tp->die_cpus_lists++;
101		buf = NULL;
102		len = 0;
103	}
104	ret = 0;
105
106try_threads:
107	scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT,
108		  sysfs__mountpoint(), cpu);
109	if (access(filename, F_OK) == -1) {
110		scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT_OLD,
111			  sysfs__mountpoint(), cpu);
112	}
113	fp = fopen(filename, "r");
114	if (!fp)
115		goto done;
116
117	if (getline(&buf, &len, fp) <= 0)
118		goto done;
119
120	p = strchr(buf, '\n');
121	if (p)
122		*p = '\0';
123
124	for (i = 0; i < tp->core_cpus_lists; i++) {
125		if (!strcmp(buf, tp->core_cpus_list[i]))
126			break;
127	}
128	if (i == tp->core_cpus_lists) {
129		tp->core_cpus_list[i] = buf;
130		tp->core_cpus_lists++;
131		buf = NULL;
132	}
133	ret = 0;
134done:
135	if (fp)
136		fclose(fp);
137	free(buf);
138	return ret;
139}
140
141void cpu_topology__delete(struct cpu_topology *tp)
142{
143	u32 i;
144
145	if (!tp)
146		return;
147
148	for (i = 0 ; i < tp->package_cpus_lists; i++)
149		zfree(&tp->package_cpus_list[i]);
150
151	for (i = 0 ; i < tp->die_cpus_lists; i++)
152		zfree(&tp->die_cpus_list[i]);
153
154	for (i = 0 ; i < tp->core_cpus_lists; i++)
155		zfree(&tp->core_cpus_list[i]);
156
157	free(tp);
158}
159
160bool cpu_topology__smt_on(const struct cpu_topology *topology)
161{
162	for (u32 i = 0; i < topology->core_cpus_lists; i++) {
163		const char *cpu_list = topology->core_cpus_list[i];
164
165		/*
166		 * If there is a need to separate siblings in a core then SMT is
167		 * enabled.
168		 */
169		if (strchr(cpu_list, ',') || strchr(cpu_list, '-'))
170			return true;
171	}
172	return false;
173}
174
175bool cpu_topology__core_wide(const struct cpu_topology *topology,
176			     const char *user_requested_cpu_list)
177{
178	struct perf_cpu_map *user_requested_cpus;
179
180	/*
181	 * If user_requested_cpu_list is empty then all CPUs are recorded and so
182	 * core_wide is true.
183	 */
184	if (!user_requested_cpu_list)
185		return true;
186
187	user_requested_cpus = perf_cpu_map__new(user_requested_cpu_list);
188	/* Check that every user requested CPU is the complete set of SMT threads on a core. */
189	for (u32 i = 0; i < topology->core_cpus_lists; i++) {
190		const char *core_cpu_list = topology->core_cpus_list[i];
191		struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list);
192		struct perf_cpu cpu;
193		int idx;
194		bool has_first, first = true;
195
196		perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
197			if (first) {
198				has_first = perf_cpu_map__has(user_requested_cpus, cpu);
199				first = false;
200			} else {
201				/*
202				 * If the first core CPU is user requested then
203				 * all subsequent CPUs in the core must be user
204				 * requested too. If the first CPU isn't user
205				 * requested then none of the others must be
206				 * too.
207				 */
208				if (perf_cpu_map__has(user_requested_cpus, cpu) != has_first) {
209					perf_cpu_map__put(core_cpus);
210					perf_cpu_map__put(user_requested_cpus);
211					return false;
212				}
213			}
214		}
215		perf_cpu_map__put(core_cpus);
216	}
217	perf_cpu_map__put(user_requested_cpus);
218	return true;
219}
220
221static bool has_die_topology(void)
222{
223	char filename[MAXPATHLEN];
224	struct utsname uts;
225
226	if (uname(&uts) < 0)
227		return false;
228
229	if (strncmp(uts.machine, "x86_64", 6) &&
230	    strncmp(uts.machine, "s390x", 5))
231		return false;
232
233	scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
234		  sysfs__mountpoint(), 0);
235	if (access(filename, F_OK) == -1)
236		return false;
237
238	return true;
239}
240
241struct cpu_topology *cpu_topology__new(void)
242{
243	struct cpu_topology *tp = NULL;
244	void *addr;
245	u32 nr, i, nr_addr;
246	size_t sz;
247	long ncpus;
248	int ret = -1;
249	struct perf_cpu_map *map;
250	bool has_die = has_die_topology();
251
252	ncpus = cpu__max_present_cpu().cpu;
253
254	/* build online CPU map */
255	map = perf_cpu_map__new(NULL);
256	if (map == NULL) {
257		pr_debug("failed to get system cpumap\n");
258		return NULL;
259	}
260
261	nr = (u32)(ncpus & UINT_MAX);
262
263	sz = nr * sizeof(char *);
264	if (has_die)
265		nr_addr = 3;
266	else
267		nr_addr = 2;
268	addr = calloc(1, sizeof(*tp) + nr_addr * sz);
269	if (!addr)
270		goto out_free;
271
272	tp = addr;
273	addr += sizeof(*tp);
274	tp->package_cpus_list = addr;
275	addr += sz;
276	if (has_die) {
277		tp->die_cpus_list = addr;
278		addr += sz;
279	}
280	tp->core_cpus_list = addr;
281
282	for (i = 0; i < nr; i++) {
283		if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i }))
284			continue;
285
286		ret = build_cpu_topology(tp, i);
287		if (ret < 0)
288			break;
289	}
290
291out_free:
292	perf_cpu_map__put(map);
293	if (ret) {
294		cpu_topology__delete(tp);
295		tp = NULL;
296	}
297	return tp;
298}
299
300static int load_numa_node(struct numa_topology_node *node, int nr)
301{
302	char str[MAXPATHLEN];
303	char field[32];
304	char *buf = NULL, *p;
305	size_t len = 0;
306	int ret = -1;
307	FILE *fp;
308	u64 mem;
309
310	node->node = (u32) nr;
311
312	scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
313		  sysfs__mountpoint(), nr);
314	fp = fopen(str, "r");
315	if (!fp)
316		return -1;
317
318	while (getline(&buf, &len, fp) > 0) {
319		/* skip over invalid lines */
320		if (!strchr(buf, ':'))
321			continue;
322		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
323			goto err;
324		if (!strcmp(field, "MemTotal:"))
325			node->mem_total = mem;
326		if (!strcmp(field, "MemFree:"))
327			node->mem_free = mem;
328		if (node->mem_total && node->mem_free)
329			break;
330	}
331
332	fclose(fp);
333	fp = NULL;
334
335	scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
336		  sysfs__mountpoint(), nr);
337
338	fp = fopen(str, "r");
339	if (!fp)
340		return -1;
341
342	if (getline(&buf, &len, fp) <= 0)
343		goto err;
344
345	p = strchr(buf, '\n');
346	if (p)
347		*p = '\0';
348
349	node->cpus = buf;
350	fclose(fp);
351	return 0;
352
353err:
354	free(buf);
355	if (fp)
356		fclose(fp);
357	return ret;
358}
359
360struct numa_topology *numa_topology__new(void)
361{
362	struct perf_cpu_map *node_map = NULL;
363	struct numa_topology *tp = NULL;
364	char path[MAXPATHLEN];
365	char *buf = NULL;
366	size_t len = 0;
367	u32 nr, i;
368	FILE *fp;
369	char *c;
370
371	scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
372		  sysfs__mountpoint());
373
374	fp = fopen(path, "r");
375	if (!fp)
376		return NULL;
377
378	if (getline(&buf, &len, fp) <= 0)
379		goto out;
380
381	c = strchr(buf, '\n');
382	if (c)
383		*c = '\0';
384
385	node_map = perf_cpu_map__new(buf);
386	if (!node_map)
387		goto out;
388
389	nr = (u32) perf_cpu_map__nr(node_map);
390
391	tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
392	if (!tp)
393		goto out;
394
395	tp->nr = nr;
396
397	for (i = 0; i < nr; i++) {
398		if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
399			numa_topology__delete(tp);
400			tp = NULL;
401			break;
402		}
403	}
404
405out:
406	free(buf);
407	fclose(fp);
408	perf_cpu_map__put(node_map);
409	return tp;
410}
411
412void numa_topology__delete(struct numa_topology *tp)
413{
414	u32 i;
415
416	for (i = 0; i < tp->nr; i++)
417		zfree(&tp->nodes[i].cpus);
418
419	free(tp);
420}
421
422static int load_hybrid_node(struct hybrid_topology_node *node,
423			    struct perf_pmu *pmu)
424{
425	const char *sysfs;
426	char path[PATH_MAX];
427	char *buf = NULL, *p;
428	FILE *fp;
429	size_t len = 0;
430
431	node->pmu_name = strdup(pmu->name);
432	if (!node->pmu_name)
433		return -1;
434
435	sysfs = sysfs__mountpoint();
436	if (!sysfs)
437		goto err;
438
439	snprintf(path, PATH_MAX, CPUS_TEMPLATE_CPU, sysfs, pmu->name);
440	fp = fopen(path, "r");
441	if (!fp)
442		goto err;
443
444	if (getline(&buf, &len, fp) <= 0) {
445		fclose(fp);
446		goto err;
447	}
448
449	p = strchr(buf, '\n');
450	if (p)
451		*p = '\0';
452
453	fclose(fp);
454	node->cpus = buf;
455	return 0;
456
457err:
458	zfree(&node->pmu_name);
459	free(buf);
460	return -1;
461}
462
463struct hybrid_topology *hybrid_topology__new(void)
464{
465	struct perf_pmu *pmu;
466	struct hybrid_topology *tp = NULL;
467	u32 nr, i = 0;
468
469	nr = perf_pmu__hybrid_pmu_num();
470	if (nr == 0)
471		return NULL;
472
473	tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
474	if (!tp)
475		return NULL;
476
477	tp->nr = nr;
478	perf_pmu__for_each_hybrid_pmu(pmu) {
479		if (load_hybrid_node(&tp->nodes[i], pmu)) {
480			hybrid_topology__delete(tp);
481			return NULL;
482		}
483		i++;
484	}
485
486	return tp;
487}
488
489void hybrid_topology__delete(struct hybrid_topology *tp)
490{
491	u32 i;
492
493	for (i = 0; i < tp->nr; i++) {
494		zfree(&tp->nodes[i].pmu_name);
495		zfree(&tp->nodes[i].cpus);
496	}
497
498	free(tp);
499}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <sys/param.h>
  3#include <sys/utsname.h>
  4#include <inttypes.h>
  5#include <stdlib.h>
  6#include <string.h>
  7#include <api/fs/fs.h>
  8#include <linux/zalloc.h>
  9#include <perf/cpumap.h>
 10
 11#include "cputopo.h"
 12#include "cpumap.h"
 13#include "debug.h"
 14#include "env.h"
 15#include "pmu-hybrid.h"
 16
 17#define CORE_SIB_FMT \
 
 
 18	"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
 19#define DIE_SIB_FMT \
 20	"%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
 21#define THRD_SIB_FMT \
 
 
 22	"%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
 23#define THRD_SIB_FMT_NEW \
 24	"%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
 25#define NODE_ONLINE_FMT \
 26	"%s/devices/system/node/online"
 27#define NODE_MEMINFO_FMT \
 28	"%s/devices/system/node/node%d/meminfo"
 29#define NODE_CPULIST_FMT \
 30	"%s/devices/system/node/node%d/cpulist"
 31
 32static int build_cpu_topology(struct cpu_topology *tp, int cpu)
 33{
 34	FILE *fp;
 35	char filename[MAXPATHLEN];
 36	char *buf = NULL, *p;
 37	size_t len = 0;
 38	ssize_t sret;
 39	u32 i = 0;
 40	int ret = -1;
 41
 42	scnprintf(filename, MAXPATHLEN, CORE_SIB_FMT,
 43		  sysfs__mountpoint(), cpu);
 
 
 
 
 44	fp = fopen(filename, "r");
 45	if (!fp)
 46		goto try_dies;
 47
 48	sret = getline(&buf, &len, fp);
 49	fclose(fp);
 50	if (sret <= 0)
 51		goto try_dies;
 52
 53	p = strchr(buf, '\n');
 54	if (p)
 55		*p = '\0';
 56
 57	for (i = 0; i < tp->core_sib; i++) {
 58		if (!strcmp(buf, tp->core_siblings[i]))
 59			break;
 60	}
 61	if (i == tp->core_sib) {
 62		tp->core_siblings[i] = buf;
 63		tp->core_sib++;
 64		buf = NULL;
 65		len = 0;
 66	}
 67	ret = 0;
 68
 69try_dies:
 70	if (!tp->die_siblings)
 71		goto try_threads;
 72
 73	scnprintf(filename, MAXPATHLEN, DIE_SIB_FMT,
 74		  sysfs__mountpoint(), cpu);
 75	fp = fopen(filename, "r");
 76	if (!fp)
 77		goto try_threads;
 78
 79	sret = getline(&buf, &len, fp);
 80	fclose(fp);
 81	if (sret <= 0)
 82		goto try_threads;
 83
 84	p = strchr(buf, '\n');
 85	if (p)
 86		*p = '\0';
 87
 88	for (i = 0; i < tp->die_sib; i++) {
 89		if (!strcmp(buf, tp->die_siblings[i]))
 90			break;
 91	}
 92	if (i == tp->die_sib) {
 93		tp->die_siblings[i] = buf;
 94		tp->die_sib++;
 95		buf = NULL;
 96		len = 0;
 97	}
 98	ret = 0;
 99
100try_threads:
101	scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT_NEW,
102		  sysfs__mountpoint(), cpu);
103	if (access(filename, F_OK) == -1) {
104		scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
105			  sysfs__mountpoint(), cpu);
106	}
107	fp = fopen(filename, "r");
108	if (!fp)
109		goto done;
110
111	if (getline(&buf, &len, fp) <= 0)
112		goto done;
113
114	p = strchr(buf, '\n');
115	if (p)
116		*p = '\0';
117
118	for (i = 0; i < tp->thread_sib; i++) {
119		if (!strcmp(buf, tp->thread_siblings[i]))
120			break;
121	}
122	if (i == tp->thread_sib) {
123		tp->thread_siblings[i] = buf;
124		tp->thread_sib++;
125		buf = NULL;
126	}
127	ret = 0;
128done:
129	if (fp)
130		fclose(fp);
131	free(buf);
132	return ret;
133}
134
135void cpu_topology__delete(struct cpu_topology *tp)
136{
137	u32 i;
138
139	if (!tp)
140		return;
141
142	for (i = 0 ; i < tp->core_sib; i++)
143		zfree(&tp->core_siblings[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
145	if (tp->die_sib) {
146		for (i = 0 ; i < tp->die_sib; i++)
147			zfree(&tp->die_siblings[i]);
 
 
 
148	}
 
 
149
150	for (i = 0 ; i < tp->thread_sib; i++)
151		zfree(&tp->thread_siblings[i]);
 
 
152
153	free(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154}
155
156static bool has_die_topology(void)
157{
158	char filename[MAXPATHLEN];
159	struct utsname uts;
160
161	if (uname(&uts) < 0)
162		return false;
163
164	if (strncmp(uts.machine, "x86_64", 6))
 
165		return false;
166
167	scnprintf(filename, MAXPATHLEN, DIE_SIB_FMT,
168		  sysfs__mountpoint(), 0);
169	if (access(filename, F_OK) == -1)
170		return false;
171
172	return true;
173}
174
175struct cpu_topology *cpu_topology__new(void)
176{
177	struct cpu_topology *tp = NULL;
178	void *addr;
179	u32 nr, i, nr_addr;
180	size_t sz;
181	long ncpus;
182	int ret = -1;
183	struct perf_cpu_map *map;
184	bool has_die = has_die_topology();
185
186	ncpus = cpu__max_present_cpu();
187
188	/* build online CPU map */
189	map = perf_cpu_map__new(NULL);
190	if (map == NULL) {
191		pr_debug("failed to get system cpumap\n");
192		return NULL;
193	}
194
195	nr = (u32)(ncpus & UINT_MAX);
196
197	sz = nr * sizeof(char *);
198	if (has_die)
199		nr_addr = 3;
200	else
201		nr_addr = 2;
202	addr = calloc(1, sizeof(*tp) + nr_addr * sz);
203	if (!addr)
204		goto out_free;
205
206	tp = addr;
207	addr += sizeof(*tp);
208	tp->core_siblings = addr;
209	addr += sz;
210	if (has_die) {
211		tp->die_siblings = addr;
212		addr += sz;
213	}
214	tp->thread_siblings = addr;
215
216	for (i = 0; i < nr; i++) {
217		if (!cpu_map__has(map, i))
218			continue;
219
220		ret = build_cpu_topology(tp, i);
221		if (ret < 0)
222			break;
223	}
224
225out_free:
226	perf_cpu_map__put(map);
227	if (ret) {
228		cpu_topology__delete(tp);
229		tp = NULL;
230	}
231	return tp;
232}
233
234static int load_numa_node(struct numa_topology_node *node, int nr)
235{
236	char str[MAXPATHLEN];
237	char field[32];
238	char *buf = NULL, *p;
239	size_t len = 0;
240	int ret = -1;
241	FILE *fp;
242	u64 mem;
243
244	node->node = (u32) nr;
245
246	scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
247		  sysfs__mountpoint(), nr);
248	fp = fopen(str, "r");
249	if (!fp)
250		return -1;
251
252	while (getline(&buf, &len, fp) > 0) {
253		/* skip over invalid lines */
254		if (!strchr(buf, ':'))
255			continue;
256		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
257			goto err;
258		if (!strcmp(field, "MemTotal:"))
259			node->mem_total = mem;
260		if (!strcmp(field, "MemFree:"))
261			node->mem_free = mem;
262		if (node->mem_total && node->mem_free)
263			break;
264	}
265
266	fclose(fp);
267	fp = NULL;
268
269	scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
270		  sysfs__mountpoint(), nr);
271
272	fp = fopen(str, "r");
273	if (!fp)
274		return -1;
275
276	if (getline(&buf, &len, fp) <= 0)
277		goto err;
278
279	p = strchr(buf, '\n');
280	if (p)
281		*p = '\0';
282
283	node->cpus = buf;
284	fclose(fp);
285	return 0;
286
287err:
288	free(buf);
289	if (fp)
290		fclose(fp);
291	return ret;
292}
293
294struct numa_topology *numa_topology__new(void)
295{
296	struct perf_cpu_map *node_map = NULL;
297	struct numa_topology *tp = NULL;
298	char path[MAXPATHLEN];
299	char *buf = NULL;
300	size_t len = 0;
301	u32 nr, i;
302	FILE *fp;
303	char *c;
304
305	scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
306		  sysfs__mountpoint());
307
308	fp = fopen(path, "r");
309	if (!fp)
310		return NULL;
311
312	if (getline(&buf, &len, fp) <= 0)
313		goto out;
314
315	c = strchr(buf, '\n');
316	if (c)
317		*c = '\0';
318
319	node_map = perf_cpu_map__new(buf);
320	if (!node_map)
321		goto out;
322
323	nr = (u32) node_map->nr;
324
325	tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
326	if (!tp)
327		goto out;
328
329	tp->nr = nr;
330
331	for (i = 0; i < nr; i++) {
332		if (load_numa_node(&tp->nodes[i], node_map->map[i])) {
333			numa_topology__delete(tp);
334			tp = NULL;
335			break;
336		}
337	}
338
339out:
340	free(buf);
341	fclose(fp);
342	perf_cpu_map__put(node_map);
343	return tp;
344}
345
346void numa_topology__delete(struct numa_topology *tp)
347{
348	u32 i;
349
350	for (i = 0; i < tp->nr; i++)
351		zfree(&tp->nodes[i].cpus);
352
353	free(tp);
354}
355
356static int load_hybrid_node(struct hybrid_topology_node *node,
357			    struct perf_pmu *pmu)
358{
359	const char *sysfs;
360	char path[PATH_MAX];
361	char *buf = NULL, *p;
362	FILE *fp;
363	size_t len = 0;
364
365	node->pmu_name = strdup(pmu->name);
366	if (!node->pmu_name)
367		return -1;
368
369	sysfs = sysfs__mountpoint();
370	if (!sysfs)
371		goto err;
372
373	snprintf(path, PATH_MAX, CPUS_TEMPLATE_CPU, sysfs, pmu->name);
374	fp = fopen(path, "r");
375	if (!fp)
376		goto err;
377
378	if (getline(&buf, &len, fp) <= 0) {
379		fclose(fp);
380		goto err;
381	}
382
383	p = strchr(buf, '\n');
384	if (p)
385		*p = '\0';
386
387	fclose(fp);
388	node->cpus = buf;
389	return 0;
390
391err:
392	zfree(&node->pmu_name);
393	free(buf);
394	return -1;
395}
396
397struct hybrid_topology *hybrid_topology__new(void)
398{
399	struct perf_pmu *pmu;
400	struct hybrid_topology *tp = NULL;
401	u32 nr, i = 0;
402
403	nr = perf_pmu__hybrid_pmu_num();
404	if (nr == 0)
405		return NULL;
406
407	tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
408	if (!tp)
409		return NULL;
410
411	tp->nr = nr;
412	perf_pmu__for_each_hybrid_pmu(pmu) {
413		if (load_hybrid_node(&tp->nodes[i], pmu)) {
414			hybrid_topology__delete(tp);
415			return NULL;
416		}
417		i++;
418	}
419
420	return tp;
421}
422
423void hybrid_topology__delete(struct hybrid_topology *tp)
424{
425	u32 i;
426
427	for (i = 0; i < tp->nr; i++) {
428		zfree(&tp->nodes[i].pmu_name);
429		zfree(&tp->nodes[i].cpus);
430	}
431
432	free(tp);
433}