Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include "util.h"
  3#include <api/fs/fs.h>
  4#include "../perf.h"
  5#include "cpumap.h"
 
 
  6#include <assert.h>
  7#include <dirent.h>
  8#include <stdio.h>
  9#include <stdlib.h>
 10#include <linux/bitmap.h>
 11#include "asm/bug.h"
 12
 13#include "sane_ctype.h"
 
 
 14
 15static int max_cpu_num;
 16static int max_present_cpu_num;
 17static int max_node_num;
 
 
 
 
 18static int *cpunode_map;
 19
 20static struct cpu_map *cpu_map__default_new(void)
 
 21{
 22	struct cpu_map *cpus;
 23	int nr_cpus;
 24
 25	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 26	if (nr_cpus < 0)
 27		return NULL;
 28
 29	cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
 30	if (cpus != NULL) {
 31		int i;
 32		for (i = 0; i < nr_cpus; ++i)
 33			cpus->map[i] = i;
 34
 35		cpus->nr = nr_cpus;
 36		refcount_set(&cpus->refcnt, 1);
 37	}
 38
 39	return cpus;
 40}
 41
 42static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
 43{
 44	size_t payload_size = nr_cpus * sizeof(int);
 45	struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
 46
 47	if (cpus != NULL) {
 48		cpus->nr = nr_cpus;
 49		memcpy(cpus->map, tmp_cpus, payload_size);
 50		refcount_set(&cpus->refcnt, 1);
 51	}
 52
 53	return cpus;
 54}
 55
 56struct cpu_map *cpu_map__read(FILE *file)
 57{
 58	struct cpu_map *cpus = NULL;
 59	int nr_cpus = 0;
 60	int *tmp_cpus = NULL, *tmp;
 61	int max_entries = 0;
 62	int n, cpu, prev;
 63	char sep;
 64
 65	sep = 0;
 66	prev = -1;
 67	for (;;) {
 68		n = fscanf(file, "%u%c", &cpu, &sep);
 69		if (n <= 0)
 70			break;
 71		if (prev >= 0) {
 72			int new_max = nr_cpus + cpu - prev - 1;
 73
 74			if (new_max >= max_entries) {
 75				max_entries = new_max + MAX_NR_CPUS / 2;
 76				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 77				if (tmp == NULL)
 78					goto out_free_tmp;
 79				tmp_cpus = tmp;
 80			}
 81
 82			while (++prev < cpu)
 83				tmp_cpus[nr_cpus++] = prev;
 84		}
 85		if (nr_cpus == max_entries) {
 86			max_entries += MAX_NR_CPUS;
 87			tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 88			if (tmp == NULL)
 89				goto out_free_tmp;
 90			tmp_cpus = tmp;
 91		}
 92
 93		tmp_cpus[nr_cpus++] = cpu;
 94		if (n == 2 && sep == '-')
 95			prev = cpu;
 96		else
 97			prev = -1;
 98		if (n == 1 || sep == '\n')
 99			break;
100	}
101
102	if (nr_cpus > 0)
103		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
104	else
105		cpus = cpu_map__default_new();
106out_free_tmp:
107	free(tmp_cpus);
108	return cpus;
109}
110
111static struct cpu_map *cpu_map__read_all_cpu_map(void)
112{
113	struct cpu_map *cpus = NULL;
114	FILE *onlnf;
115
116	onlnf = fopen("/sys/devices/system/cpu/online", "r");
117	if (!onlnf)
118		return cpu_map__default_new();
119
120	cpus = cpu_map__read(onlnf);
121	fclose(onlnf);
122	return cpus;
123}
124
125struct cpu_map *cpu_map__new(const char *cpu_list)
126{
127	struct cpu_map *cpus = NULL;
128	unsigned long start_cpu, end_cpu = 0;
129	char *p = NULL;
130	int i, nr_cpus = 0;
131	int *tmp_cpus = NULL, *tmp;
132	int max_entries = 0;
133
134	if (!cpu_list)
135		return cpu_map__read_all_cpu_map();
136
137	if (!isdigit(*cpu_list))
138		goto out;
139
140	while (isdigit(*cpu_list)) {
141		p = NULL;
142		start_cpu = strtoul(cpu_list, &p, 0);
143		if (start_cpu >= INT_MAX
144		    || (*p != '\0' && *p != ',' && *p != '-'))
145			goto invalid;
146
147		if (*p == '-') {
148			cpu_list = ++p;
149			p = NULL;
150			end_cpu = strtoul(cpu_list, &p, 0);
151
152			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
153				goto invalid;
154
155			if (end_cpu < start_cpu)
156				goto invalid;
157		} else {
158			end_cpu = start_cpu;
159		}
160
161		for (; start_cpu <= end_cpu; start_cpu++) {
162			/* check for duplicates */
163			for (i = 0; i < nr_cpus; i++)
164				if (tmp_cpus[i] == (int)start_cpu)
165					goto invalid;
166
167			if (nr_cpus == max_entries) {
168				max_entries += MAX_NR_CPUS;
169				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
170				if (tmp == NULL)
171					goto invalid;
172				tmp_cpus = tmp;
173			}
174			tmp_cpus[nr_cpus++] = (int)start_cpu;
175		}
176		if (*p)
177			++p;
178
179		cpu_list = p;
180	}
181
182	if (nr_cpus > 0)
183		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
184	else
185		cpus = cpu_map__default_new();
186invalid:
187	free(tmp_cpus);
188out:
189	return cpus;
190}
191
192static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
193{
194	struct cpu_map *map;
195
196	map = cpu_map__empty_new(cpus->nr);
197	if (map) {
198		unsigned i;
199
200		for (i = 0; i < cpus->nr; i++) {
201			/*
202			 * Special treatment for -1, which is not real cpu number,
203			 * and we need to use (int) -1 to initialize map[i],
204			 * otherwise it would become 65535.
205			 */
206			if (cpus->cpu[i] == (u16) -1)
207				map->map[i] = -1;
208			else
209				map->map[i] = (int) cpus->cpu[i];
210		}
211	}
212
213	return map;
214}
215
216static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
217{
218	struct cpu_map *map;
219	int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
 
220
221	nr = bitmap_weight(mask->mask, nbits);
 
 
 
222
223	map = cpu_map__empty_new(nr);
224	if (map) {
225		int cpu, i = 0;
226
227		for_each_set_bit(cpu, mask->mask, nbits)
228			map->map[i++] = cpu;
 
 
 
 
 
229	}
230	return map;
231
232}
233
234struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
235{
236	if (data->type == PERF_CPU_MAP__CPUS)
237		return cpu_map__from_entries((struct cpu_map_entries *)data->data);
238	else
239		return cpu_map__from_mask((struct cpu_map_mask *)data->data);
 
 
 
 
 
 
 
 
 
 
 
 
240}
241
242size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243{
244#define BUFSIZE 1024
245	char buf[BUFSIZE];
246
247	cpu_map__snprint(map, buf, sizeof(buf));
248	return fprintf(fp, "%s\n", buf);
249#undef BUFSIZE
250}
251
252struct cpu_map *cpu_map__dummy_new(void)
253{
254	struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
255
256	if (cpus != NULL) {
257		cpus->nr = 1;
258		cpus->map[0] = -1;
 
 
 
 
259		refcount_set(&cpus->refcnt, 1);
260	}
261
262	return cpus;
263}
264
265struct cpu_map *cpu_map__empty_new(int nr)
266{
267	struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
268
269	if (cpus != NULL) {
270		int i;
271
272		cpus->nr = nr;
273		for (i = 0; i < nr; i++)
274			cpus->map[i] = -1;
275
276		refcount_set(&cpus->refcnt, 1);
277	}
278
279	return cpus;
280}
281
282static void cpu_map__delete(struct cpu_map *map)
283{
284	if (map) {
285		WARN_ONCE(refcount_read(&map->refcnt) != 0,
286			  "cpu_map refcnt unbalanced\n");
287		free(map);
288	}
289}
290
291struct cpu_map *cpu_map__get(struct cpu_map *map)
292{
293	if (map)
294		refcount_inc(&map->refcnt);
295	return map;
296}
297
298void cpu_map__put(struct cpu_map *map)
299{
300	if (map && refcount_dec_and_test(&map->refcnt))
301		cpu_map__delete(map);
302}
303
304static int cpu__get_topology_int(int cpu, const char *name, int *value)
305{
306	char path[PATH_MAX];
307
308	snprintf(path, PATH_MAX,
309		"devices/system/cpu/cpu%d/topology/%s", cpu, name);
310
311	return sysfs__read_int(path, value);
312}
313
314int cpu_map__get_socket_id(int cpu)
315{
316	int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
317	return ret ?: value;
318}
319
320int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
321{
322	int cpu;
323
324	if (idx > map->nr)
325		return -1;
326
327	cpu = map->map[idx];
328
329	return cpu_map__get_socket_id(cpu);
 
330}
331
332static int cmp_ids(const void *a, const void *b)
333{
334	return *(int *)a - *(int *)b;
 
 
 
 
 
 
 
 
 
 
 
 
335}
336
337int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
338		       int (*f)(struct cpu_map *map, int cpu, void *data),
339		       void *data)
340{
341	struct cpu_map *c;
342	int nr = cpus->nr;
343	int cpu, s1, s2;
344
345	/* allocate as much as possible */
346	c = calloc(1, sizeof(*c) + nr * sizeof(int));
347	if (!c)
348		return -1;
 
 
 
349
350	for (cpu = 0; cpu < nr; cpu++) {
351		s1 = f(cpus, cpu, data);
352		for (s2 = 0; s2 < c->nr; s2++) {
353			if (s1 == c->map[s2])
 
 
 
354				break;
 
355		}
356		if (s2 == c->nr) {
357			c->map[c->nr] = s1;
358			c->nr++;
359		}
360	}
 
 
 
 
 
 
 
 
 
 
361	/* ensure we process id in increasing order */
362	qsort(c->map, c->nr, sizeof(int), cmp_ids);
 
 
 
363
364	refcount_set(&c->refcnt, 1);
365	*res = c;
366	return 0;
367}
368
369int cpu_map__get_core_id(int cpu)
370{
371	int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
 
372	return ret ?: value;
373}
374
375int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
376{
377	int cpu, s;
 
378
379	if (idx > map->nr)
380		return -1;
 
 
381
382	cpu = map->map[idx];
 
 
 
 
 
 
 
383
384	cpu = cpu_map__get_core_id(cpu);
 
 
385
386	s = cpu_map__get_socket(map, idx, data);
387	if (s == -1)
388		return -1;
 
 
 
 
 
 
 
 
 
 
 
 
389
390	/*
391	 * encode socket in upper 16 bits
392	 * core_id is relative to socket, and
393	 * we need a global id. So we combine
394	 * socket+ core id
395	 */
396	return (s << 16) | (cpu & 0xffff);
 
 
397}
398
399int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
400{
401	return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402}
403
404int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
405{
406	return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
 
 
 
 
 
407}
408
409/* setup simple routines to easily access node numbers given a cpu number */
410static int get_max_num(char *path, int *max)
411{
412	size_t num;
413	char *buf;
414	int err = 0;
415
416	if (filename__read_str(path, &buf, &num))
417		return -1;
418
419	buf[num] = '\0';
420
421	/* start on the right, to find highest node num */
422	while (--num) {
423		if ((buf[num] == ',') || (buf[num] == '-')) {
424			num++;
425			break;
426		}
427	}
428	if (sscanf(&buf[num], "%d", max) < 1) {
429		err = -1;
430		goto out;
431	}
432
433	/* convert from 0-based to 1-based */
434	(*max)++;
435
436out:
437	free(buf);
438	return err;
439}
440
441/* Determine highest possible cpu in the system for sparse allocation */
442static void set_max_cpu_num(void)
443{
444	const char *mnt;
445	char path[PATH_MAX];
446	int ret = -1;
447
448	/* set up default */
449	max_cpu_num = 4096;
450	max_present_cpu_num = 4096;
451
452	mnt = sysfs__mountpoint();
453	if (!mnt)
454		goto out;
455
456	/* get the highest possible cpu number for a sparse allocation */
457	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
458	if (ret == PATH_MAX) {
459		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
460		goto out;
461	}
462
463	ret = get_max_num(path, &max_cpu_num);
464	if (ret)
465		goto out;
466
467	/* get the highest present cpu number for a sparse allocation */
468	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
469	if (ret == PATH_MAX) {
470		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
471		goto out;
472	}
473
474	ret = get_max_num(path, &max_present_cpu_num);
475
476out:
477	if (ret)
478		pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
479}
480
481/* Determine highest possible node in the system for sparse allocation */
482static void set_max_node_num(void)
483{
484	const char *mnt;
485	char path[PATH_MAX];
486	int ret = -1;
487
488	/* set up default */
489	max_node_num = 8;
490
491	mnt = sysfs__mountpoint();
492	if (!mnt)
493		goto out;
494
495	/* get the highest possible cpu number for a sparse allocation */
496	ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
497	if (ret == PATH_MAX) {
498		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
499		goto out;
500	}
501
502	ret = get_max_num(path, &max_node_num);
503
504out:
505	if (ret)
506		pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
507}
508
509int cpu__max_node(void)
510{
511	if (unlikely(!max_node_num))
512		set_max_node_num();
513
514	return max_node_num;
515}
516
517int cpu__max_cpu(void)
518{
519	if (unlikely(!max_cpu_num))
520		set_max_cpu_num();
521
522	return max_cpu_num;
523}
524
525int cpu__max_present_cpu(void)
526{
527	if (unlikely(!max_present_cpu_num))
528		set_max_cpu_num();
529
530	return max_present_cpu_num;
531}
532
533
534int cpu__get_node(int cpu)
535{
536	if (unlikely(cpunode_map == NULL)) {
537		pr_debug("cpu_map not initialized\n");
538		return -1;
539	}
540
541	return cpunode_map[cpu];
542}
543
544static int init_cpunode_map(void)
545{
546	int i;
547
548	set_max_cpu_num();
549	set_max_node_num();
550
551	cpunode_map = calloc(max_cpu_num, sizeof(int));
552	if (!cpunode_map) {
553		pr_err("%s: calloc failed\n", __func__);
554		return -1;
555	}
556
557	for (i = 0; i < max_cpu_num; i++)
558		cpunode_map[i] = -1;
559
560	return 0;
561}
562
563int cpu__setup_cpunode_map(void)
564{
565	struct dirent *dent1, *dent2;
566	DIR *dir1, *dir2;
567	unsigned int cpu, mem;
568	char buf[PATH_MAX];
569	char path[PATH_MAX];
570	const char *mnt;
571	int n;
572
573	/* initialize globals */
574	if (init_cpunode_map())
575		return -1;
576
577	mnt = sysfs__mountpoint();
578	if (!mnt)
579		return 0;
580
581	n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
582	if (n == PATH_MAX) {
583		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
584		return -1;
585	}
586
587	dir1 = opendir(path);
588	if (!dir1)
589		return 0;
590
591	/* walk tree and setup map */
592	while ((dent1 = readdir(dir1)) != NULL) {
593		if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
594			continue;
595
596		n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
597		if (n == PATH_MAX) {
598			pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
599			continue;
600		}
601
602		dir2 = opendir(buf);
603		if (!dir2)
604			continue;
605		while ((dent2 = readdir(dir2)) != NULL) {
606			if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
607				continue;
608			cpunode_map[cpu] = mem;
609		}
610		closedir(dir2);
611	}
612	closedir(dir1);
613	return 0;
614}
615
616bool cpu_map__has(struct cpu_map *cpus, int cpu)
617{
618	return cpu_map__idx(cpus, cpu) != -1;
619}
620
621int cpu_map__idx(struct cpu_map *cpus, int cpu)
622{
623	int i;
624
625	for (i = 0; i < cpus->nr; ++i) {
626		if (cpus->map[i] == cpu)
627			return i;
628	}
629
630	return -1;
631}
632
633int cpu_map__cpu(struct cpu_map *cpus, int idx)
634{
635	return cpus->map[idx];
636}
637
638size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
639{
640	int i, cpu, start = -1;
641	bool first = true;
642	size_t ret = 0;
643
644#define COMMA first ? "" : ","
645
646	for (i = 0; i < map->nr + 1; i++) {
 
647		bool last = i == map->nr;
648
649		cpu = last ? INT_MAX : map->map[i];
 
650
651		if (start == -1) {
652			start = i;
653			if (last) {
654				ret += snprintf(buf + ret, size - ret,
655						"%s%d", COMMA,
656						map->map[i]);
657			}
658		} else if (((i - start) != (cpu - map->map[start])) || last) {
659			int end = i - 1;
660
661			if (start == end) {
662				ret += snprintf(buf + ret, size - ret,
663						"%s%d", COMMA,
664						map->map[start]);
665			} else {
666				ret += snprintf(buf + ret, size - ret,
667						"%s%d-%d", COMMA,
668						map->map[start], map->map[end]);
669			}
670			first = false;
671			start = i;
672		}
673	}
674
675#undef COMMA
676
677	pr_debug("cpumask list: %s\n", buf);
678	return ret;
679}
680
681static char hex_char(unsigned char val)
682{
683	if (val < 10)
684		return val + '0';
685	if (val < 16)
686		return val - 10 + 'a';
687	return '?';
688}
689
690size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
691{
692	int i, cpu;
693	char *ptr = buf;
694	unsigned char *bitmap;
695	int last_cpu = cpu_map__cpu(map, map->nr - 1);
696
697	bitmap = zalloc((last_cpu + 7) / 8);
 
 
 
698	if (bitmap == NULL) {
699		buf[0] = '\0';
700		return 0;
701	}
702
703	for (i = 0; i < map->nr; i++) {
704		cpu = cpu_map__cpu(map, i);
705		bitmap[cpu / 8] |= 1 << (cpu % 8);
706	}
707
708	for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
709		unsigned char bits = bitmap[cpu / 8];
710
711		if (cpu % 8)
712			bits >>= 4;
713		else
714			bits &= 0xf;
715
716		*ptr++ = hex_char(bits);
717		if ((cpu % 32) == 0 && cpu > 0)
718			*ptr++ = ',';
719	}
720	*ptr = '\0';
721	free(bitmap);
722
723	buf[size - 1] = '\0';
724	return ptr - buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
725}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include <api/fs/fs.h>
 
  3#include "cpumap.h"
  4#include "debug.h"
  5#include "event.h"
  6#include <assert.h>
  7#include <dirent.h>
  8#include <stdio.h>
  9#include <stdlib.h>
 10#include <linux/bitmap.h>
 11#include "asm/bug.h"
 12
 13#include <linux/ctype.h>
 14#include <linux/zalloc.h>
 15#include <internal/cpumap.h>
 16
 17static struct perf_cpu max_cpu_num;
 18static struct perf_cpu max_present_cpu_num;
 19static int max_node_num;
 20/**
 21 * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
 22 * CPU number.
 23 */
 24static int *cpunode_map;
 25
 26bool perf_record_cpu_map_data__test_bit(int i,
 27					const struct perf_record_cpu_map_data *data)
 28{
 29	int bit_word32 = i / 32;
 30	__u32 bit_mask32 = 1U << (i & 31);
 31	int bit_word64 = i / 64;
 32	__u64 bit_mask64 = ((__u64)1) << (i & 63);
 33
 34	return (data->mask32_data.long_size == 4)
 35		? (bit_word32 < data->mask32_data.nr) &&
 36		(data->mask32_data.mask[bit_word32] & bit_mask32) != 0
 37		: (bit_word64 < data->mask64_data.nr) &&
 38		(data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
 39}
 40
 41/* Read ith mask value from data into the given 64-bit sized bitmap */
 42static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
 43						    int i, unsigned long *bitmap)
 44{
 45#if __SIZEOF_LONG__ == 8
 46	if (data->mask32_data.long_size == 4)
 47		bitmap[0] = data->mask32_data.mask[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48	else
 49		bitmap[0] = data->mask64_data.mask[i];
 50#else
 51	if (data->mask32_data.long_size == 4) {
 52		bitmap[0] = data->mask32_data.mask[i];
 53		bitmap[1] = 0;
 54	} else {
 55#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
 56		bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
 57		bitmap[1] = (unsigned long)data->mask64_data.mask[i];
 58#else
 59		bitmap[0] = (unsigned long)data->mask64_data.mask[i];
 60		bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
 61#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62	}
 63#endif
 
 
 
 
 
 
 
 
 64}
 65static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
 
 66{
 67	struct perf_cpu_map *map;
 68
 69	map = perf_cpu_map__empty_new(data->cpus_data.nr);
 70	if (map) {
 71		unsigned i;
 72
 73		for (i = 0; i < data->cpus_data.nr; i++) {
 74			/*
 75			 * Special treatment for -1, which is not real cpu number,
 76			 * and we need to use (int) -1 to initialize map[i],
 77			 * otherwise it would become 65535.
 78			 */
 79			if (data->cpus_data.cpu[i] == (u16) -1)
 80				map->map[i].cpu = -1;
 81			else
 82				map->map[i].cpu = (int) data->cpus_data.cpu[i];
 83		}
 84	}
 85
 86	return map;
 87}
 88
 89static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
 90{
 91	DECLARE_BITMAP(local_copy, 64);
 92	int weight = 0, mask_nr = data->mask32_data.nr;
 93	struct perf_cpu_map *map;
 94
 95	for (int i = 0; i < mask_nr; i++) {
 96		perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
 97		weight += bitmap_weight(local_copy, 64);
 98	}
 99
100	map = perf_cpu_map__empty_new(weight);
101	if (!map)
102		return NULL;
103
104	for (int i = 0, j = 0; i < mask_nr; i++) {
105		int cpus_per_i = (i * data->mask32_data.long_size  * BITS_PER_BYTE);
106		int cpu;
107
108		perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
109		for_each_set_bit(cpu, local_copy, 64)
110			map->map[j++].cpu = cpu + cpus_per_i;
111	}
112	return map;
113
114}
115
116static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map_data *data)
117{
118	struct perf_cpu_map *map;
119	unsigned int i = 0;
120
121	map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
122				data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
123	if (!map)
124		return NULL;
125
126	if (data->range_cpu_data.any_cpu)
127		map->map[i++].cpu = -1;
128
129	for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
130	     i++, cpu++)
131		map->map[i].cpu = cpu;
132
133	return map;
134}
135
136struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
137{
138	switch (data->type) {
139	case PERF_CPU_MAP__CPUS:
140		return cpu_map__from_entries(data);
141	case PERF_CPU_MAP__MASK:
142		return cpu_map__from_mask(data);
143	case PERF_CPU_MAP__RANGE_CPUS:
144		return cpu_map__from_range(data);
145	default:
146		pr_err("cpu_map__new_data unknown type %d\n", data->type);
147		return NULL;
148	}
149}
150
151size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
152{
153#define BUFSIZE 1024
154	char buf[BUFSIZE];
155
156	cpu_map__snprint(map, buf, sizeof(buf));
157	return fprintf(fp, "%s\n", buf);
158#undef BUFSIZE
159}
160
161struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
162{
163	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
164
165	if (cpus != NULL) {
166		int i;
167
168		cpus->nr = nr;
169		for (i = 0; i < nr; i++)
170			cpus->map[i].cpu = -1;
171
172		refcount_set(&cpus->refcnt, 1);
173	}
174
175	return cpus;
176}
177
178struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
179{
180	struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
181
182	if (cpus != NULL) {
183		int i;
184
185		cpus->nr = nr;
186		for (i = 0; i < nr; i++)
187			cpus->map[i] = aggr_cpu_id__empty();
188
189		refcount_set(&cpus->refcnt, 1);
190	}
191
192	return cpus;
193}
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195static int cpu__get_topology_int(int cpu, const char *name, int *value)
196{
197	char path[PATH_MAX];
198
199	snprintf(path, PATH_MAX,
200		"devices/system/cpu/cpu%d/topology/%s", cpu, name);
201
202	return sysfs__read_int(path, value);
203}
204
205int cpu__get_socket_id(struct perf_cpu cpu)
206{
207	int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
208	return ret ?: value;
209}
210
211struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
212{
213	struct aggr_cpu_id id = aggr_cpu_id__empty();
 
 
 
 
 
214
215	id.socket = cpu__get_socket_id(cpu);
216	return id;
217}
218
219static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
220{
221	struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
222	struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
223
224	if (a->node != b->node)
225		return a->node - b->node;
226	else if (a->socket != b->socket)
227		return a->socket - b->socket;
228	else if (a->die != b->die)
229		return a->die - b->die;
230	else if (a->core != b->core)
231		return a->core - b->core;
232	else
233		return a->thread_idx - b->thread_idx;
234}
235
236struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
237				       aggr_cpu_id_get_t get_id,
238				       void *data, bool needs_sort)
239{
240	int idx;
241	struct perf_cpu cpu;
242	struct cpu_aggr_map *c = cpu_aggr_map__empty_new(cpus->nr);
243
 
 
244	if (!c)
245		return NULL;
246
247	/* Reset size as it may only be partially filled */
248	c->nr = 0;
249
250	perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
251		bool duplicate = false;
252		struct aggr_cpu_id cpu_id = get_id(cpu, data);
253
254		for (int j = 0; j < c->nr; j++) {
255			if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
256				duplicate = true;
257				break;
258			}
259		}
260		if (!duplicate) {
261			c->map[c->nr] = cpu_id;
262			c->nr++;
263		}
264	}
265	/* Trim. */
266	if (c->nr != cpus->nr) {
267		struct cpu_aggr_map *trimmed_c =
268			realloc(c,
269				sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
270
271		if (trimmed_c)
272			c = trimmed_c;
273	}
274
275	/* ensure we process id in increasing order */
276	if (needs_sort)
277		qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
278
279	return c;
280
 
 
 
281}
282
283int cpu__get_die_id(struct perf_cpu cpu)
284{
285	int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
286
287	return ret ?: value;
288}
289
290struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
291{
292	struct aggr_cpu_id id;
293	int die;
294
295	die = cpu__get_die_id(cpu);
296	/* There is no die_id on legacy system. */
297	if (die == -1)
298		die = 0;
299
300	/*
301	 * die_id is relative to socket, so start
302	 * with the socket ID and then add die to
303	 * make a unique ID.
304	 */
305	id = aggr_cpu_id__socket(cpu, data);
306	if (aggr_cpu_id__is_empty(&id))
307		return id;
308
309	id.die = die;
310	return id;
311}
312
313int cpu__get_core_id(struct perf_cpu cpu)
314{
315	int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
316	return ret ?: value;
317}
318
319struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
320{
321	struct aggr_cpu_id id;
322	int core = cpu__get_core_id(cpu);
323
324	/* aggr_cpu_id__die returns a struct with socket and die set. */
325	id = aggr_cpu_id__die(cpu, data);
326	if (aggr_cpu_id__is_empty(&id))
327		return id;
328
329	/*
330	 * core_id is relative to socket and die, we need a global id.
331	 * So we combine the result from cpu_map__get_die with the core id
 
 
332	 */
333	id.core = core;
334	return id;
335
336}
337
338struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
339{
340	struct aggr_cpu_id id;
341
342	/* aggr_cpu_id__core returns a struct with socket, die and core set. */
343	id = aggr_cpu_id__core(cpu, data);
344	if (aggr_cpu_id__is_empty(&id))
345		return id;
346
347	id.cpu = cpu;
348	return id;
349
350}
351
352struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
353{
354	struct aggr_cpu_id id = aggr_cpu_id__empty();
355
356	id.node = cpu__get_node(cpu);
357	return id;
358}
359
360struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
361{
362	struct aggr_cpu_id id = aggr_cpu_id__empty();
363
364	/* it always aggregates to the cpu 0 */
365	cpu.cpu = 0;
366	id.cpu = cpu;
367	return id;
368}
369
370/* setup simple routines to easily access node numbers given a cpu number */
371static int get_max_num(char *path, int *max)
372{
373	size_t num;
374	char *buf;
375	int err = 0;
376
377	if (filename__read_str(path, &buf, &num))
378		return -1;
379
380	buf[num] = '\0';
381
382	/* start on the right, to find highest node num */
383	while (--num) {
384		if ((buf[num] == ',') || (buf[num] == '-')) {
385			num++;
386			break;
387		}
388	}
389	if (sscanf(&buf[num], "%d", max) < 1) {
390		err = -1;
391		goto out;
392	}
393
394	/* convert from 0-based to 1-based */
395	(*max)++;
396
397out:
398	free(buf);
399	return err;
400}
401
402/* Determine highest possible cpu in the system for sparse allocation */
403static void set_max_cpu_num(void)
404{
405	const char *mnt;
406	char path[PATH_MAX];
407	int ret = -1;
408
409	/* set up default */
410	max_cpu_num.cpu = 4096;
411	max_present_cpu_num.cpu = 4096;
412
413	mnt = sysfs__mountpoint();
414	if (!mnt)
415		goto out;
416
417	/* get the highest possible cpu number for a sparse allocation */
418	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
419	if (ret >= PATH_MAX) {
420		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
421		goto out;
422	}
423
424	ret = get_max_num(path, &max_cpu_num.cpu);
425	if (ret)
426		goto out;
427
428	/* get the highest present cpu number for a sparse allocation */
429	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
430	if (ret >= PATH_MAX) {
431		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
432		goto out;
433	}
434
435	ret = get_max_num(path, &max_present_cpu_num.cpu);
436
437out:
438	if (ret)
439		pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
440}
441
442/* Determine highest possible node in the system for sparse allocation */
443static void set_max_node_num(void)
444{
445	const char *mnt;
446	char path[PATH_MAX];
447	int ret = -1;
448
449	/* set up default */
450	max_node_num = 8;
451
452	mnt = sysfs__mountpoint();
453	if (!mnt)
454		goto out;
455
456	/* get the highest possible cpu number for a sparse allocation */
457	ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
458	if (ret >= PATH_MAX) {
459		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
460		goto out;
461	}
462
463	ret = get_max_num(path, &max_node_num);
464
465out:
466	if (ret)
467		pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
468}
469
470int cpu__max_node(void)
471{
472	if (unlikely(!max_node_num))
473		set_max_node_num();
474
475	return max_node_num;
476}
477
478struct perf_cpu cpu__max_cpu(void)
479{
480	if (unlikely(!max_cpu_num.cpu))
481		set_max_cpu_num();
482
483	return max_cpu_num;
484}
485
486struct perf_cpu cpu__max_present_cpu(void)
487{
488	if (unlikely(!max_present_cpu_num.cpu))
489		set_max_cpu_num();
490
491	return max_present_cpu_num;
492}
493
494
495int cpu__get_node(struct perf_cpu cpu)
496{
497	if (unlikely(cpunode_map == NULL)) {
498		pr_debug("cpu_map not initialized\n");
499		return -1;
500	}
501
502	return cpunode_map[cpu.cpu];
503}
504
505static int init_cpunode_map(void)
506{
507	int i;
508
509	set_max_cpu_num();
510	set_max_node_num();
511
512	cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
513	if (!cpunode_map) {
514		pr_err("%s: calloc failed\n", __func__);
515		return -1;
516	}
517
518	for (i = 0; i < max_cpu_num.cpu; i++)
519		cpunode_map[i] = -1;
520
521	return 0;
522}
523
524int cpu__setup_cpunode_map(void)
525{
526	struct dirent *dent1, *dent2;
527	DIR *dir1, *dir2;
528	unsigned int cpu, mem;
529	char buf[PATH_MAX];
530	char path[PATH_MAX];
531	const char *mnt;
532	int n;
533
534	/* initialize globals */
535	if (init_cpunode_map())
536		return -1;
537
538	mnt = sysfs__mountpoint();
539	if (!mnt)
540		return 0;
541
542	n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
543	if (n >= PATH_MAX) {
544		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
545		return -1;
546	}
547
548	dir1 = opendir(path);
549	if (!dir1)
550		return 0;
551
552	/* walk tree and setup map */
553	while ((dent1 = readdir(dir1)) != NULL) {
554		if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
555			continue;
556
557		n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
558		if (n >= PATH_MAX) {
559			pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
560			continue;
561		}
562
563		dir2 = opendir(buf);
564		if (!dir2)
565			continue;
566		while ((dent2 = readdir(dir2)) != NULL) {
567			if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
568				continue;
569			cpunode_map[cpu] = mem;
570		}
571		closedir(dir2);
572	}
573	closedir(dir1);
574	return 0;
575}
576
577size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578{
579	int i, start = -1;
580	bool first = true;
581	size_t ret = 0;
582
583#define COMMA first ? "" : ","
584
585	for (i = 0; i < map->nr + 1; i++) {
586		struct perf_cpu cpu = { .cpu = INT_MAX };
587		bool last = i == map->nr;
588
589		if (!last)
590			cpu = map->map[i];
591
592		if (start == -1) {
593			start = i;
594			if (last) {
595				ret += snprintf(buf + ret, size - ret,
596						"%s%d", COMMA,
597						map->map[i].cpu);
598			}
599		} else if (((i - start) != (cpu.cpu - map->map[start].cpu)) || last) {
600			int end = i - 1;
601
602			if (start == end) {
603				ret += snprintf(buf + ret, size - ret,
604						"%s%d", COMMA,
605						map->map[start].cpu);
606			} else {
607				ret += snprintf(buf + ret, size - ret,
608						"%s%d-%d", COMMA,
609						map->map[start].cpu, map->map[end].cpu);
610			}
611			first = false;
612			start = i;
613		}
614	}
615
616#undef COMMA
617
618	pr_debug2("cpumask list: %s\n", buf);
619	return ret;
620}
621
622static char hex_char(unsigned char val)
623{
624	if (val < 10)
625		return val + '0';
626	if (val < 16)
627		return val - 10 + 'a';
628	return '?';
629}
630
631size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
632{
633	int i, cpu;
634	char *ptr = buf;
635	unsigned char *bitmap;
636	struct perf_cpu last_cpu = perf_cpu_map__cpu(map, map->nr - 1);
637
638	if (buf == NULL)
639		return 0;
640
641	bitmap = zalloc(last_cpu.cpu / 8 + 1);
642	if (bitmap == NULL) {
643		buf[0] = '\0';
644		return 0;
645	}
646
647	for (i = 0; i < map->nr; i++) {
648		cpu = perf_cpu_map__cpu(map, i).cpu;
649		bitmap[cpu / 8] |= 1 << (cpu % 8);
650	}
651
652	for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
653		unsigned char bits = bitmap[cpu / 8];
654
655		if (cpu % 8)
656			bits >>= 4;
657		else
658			bits &= 0xf;
659
660		*ptr++ = hex_char(bits);
661		if ((cpu % 32) == 0 && cpu > 0)
662			*ptr++ = ',';
663	}
664	*ptr = '\0';
665	free(bitmap);
666
667	buf[size - 1] = '\0';
668	return ptr - buf;
669}
670
671const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
672{
673	static const struct perf_cpu_map *online = NULL;
674
675	if (!online)
676		online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
677
678	return online;
679}
680
681bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
682{
683	return a->thread_idx == b->thread_idx &&
684		a->node == b->node &&
685		a->socket == b->socket &&
686		a->die == b->die &&
687		a->core == b->core &&
688		a->cpu.cpu == b->cpu.cpu;
689}
690
691bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
692{
693	return a->thread_idx == -1 &&
694		a->node == -1 &&
695		a->socket == -1 &&
696		a->die == -1 &&
697		a->core == -1 &&
698		a->cpu.cpu == -1;
699}
700
701struct aggr_cpu_id aggr_cpu_id__empty(void)
702{
703	struct aggr_cpu_id ret = {
704		.thread_idx = -1,
705		.node = -1,
706		.socket = -1,
707		.die = -1,
708		.core = -1,
709		.cpu = (struct perf_cpu){ .cpu = -1 },
710	};
711	return ret;
712}