Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <perf/cpumap.h>
  3#include <stdlib.h>
  4#include <linux/refcount.h>
  5#include <internal/cpumap.h>
  6#include <asm/bug.h>
  7#include <stdio.h>
  8#include <string.h>
  9#include <unistd.h>
 10#include <ctype.h>
 11#include <limits.h>
 12#include "internal.h"
 13
 14void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
 15{
 16	RC_CHK_ACCESS(map)->nr = nr_cpus;
 17}
 18
 19struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
 20{
 21	RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
 22	struct perf_cpu_map *result;
 23
 
 
 
 
 24	if (ADD_RC_CHK(result, cpus)) {
 25		cpus->nr = nr_cpus;
 26		refcount_set(&cpus->refcnt, 1);
 27	}
 28	return result;
 29}
 30
 31struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
 32{
 33	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
 34
 35	if (cpus)
 36		RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
 37
 38	return cpus;
 39}
 40
 41static void cpu_map__delete(struct perf_cpu_map *map)
 42{
 43	if (map) {
 44		WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
 45			  "cpu_map refcnt unbalanced\n");
 46		RC_CHK_FREE(map);
 47	}
 48}
 49
 50struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
 51{
 52	struct perf_cpu_map *result;
 53
 54	if (RC_CHK_GET(result, map))
 55		refcount_inc(perf_cpu_map__refcnt(map));
 56
 57	return result;
 58}
 59
 60void perf_cpu_map__put(struct perf_cpu_map *map)
 61{
 62	if (map) {
 63		if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
 64			cpu_map__delete(map);
 65		else
 66			RC_CHK_PUT(map);
 67	}
 68}
 69
 70static struct perf_cpu_map *cpu_map__new_sysconf(void)
 71{
 72	struct perf_cpu_map *cpus;
 73	int nr_cpus, nr_cpus_conf;
 74
 75	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 76	if (nr_cpus < 0)
 77		return NULL;
 78
 79	nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
 80	if (nr_cpus != nr_cpus_conf) {
 81		pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
 82			nr_cpus, nr_cpus_conf, nr_cpus);
 83	}
 84
 85	cpus = perf_cpu_map__alloc(nr_cpus);
 86	if (cpus != NULL) {
 87		int i;
 88
 89		for (i = 0; i < nr_cpus; ++i)
 90			RC_CHK_ACCESS(cpus)->map[i].cpu = i;
 91	}
 92
 93	return cpus;
 94}
 95
 96static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
 97{
 98	struct perf_cpu_map *cpus = NULL;
 99	FILE *onlnf;
100
101	onlnf = fopen("/sys/devices/system/cpu/online", "r");
102	if (onlnf) {
103		cpus = perf_cpu_map__read(onlnf);
104		fclose(onlnf);
105	}
106	return cpus;
107}
108
109struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
110{
111	struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
112
113	if (cpus)
114		return cpus;
115
116	return cpu_map__new_sysconf();
117}
118
119
120static int cmp_cpu(const void *a, const void *b)
121{
122	const struct perf_cpu *cpu_a = a, *cpu_b = b;
123
124	return cpu_a->cpu - cpu_b->cpu;
125}
126
127static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
128{
129	return RC_CHK_ACCESS(cpus)->map[idx];
130}
131
132static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
133{
134	size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
135	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
136	int i, j;
137
138	if (cpus != NULL) {
139		memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
140		qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
141		/* Remove dups */
142		j = 0;
143		for (i = 0; i < nr_cpus; i++) {
144			if (i == 0 ||
145			    __perf_cpu_map__cpu(cpus, i).cpu !=
146			    __perf_cpu_map__cpu(cpus, i - 1).cpu) {
147				RC_CHK_ACCESS(cpus)->map[j++].cpu =
148					__perf_cpu_map__cpu(cpus, i).cpu;
149			}
150		}
151		perf_cpu_map__set_nr(cpus, j);
152		assert(j <= nr_cpus);
153	}
154	return cpus;
155}
156
157struct perf_cpu_map *perf_cpu_map__read(FILE *file)
158{
159	struct perf_cpu_map *cpus = NULL;
160	int nr_cpus = 0;
161	struct perf_cpu *tmp_cpus = NULL, *tmp;
162	int max_entries = 0;
163	int n, cpu, prev;
164	char sep;
165
166	sep = 0;
167	prev = -1;
168	for (;;) {
169		n = fscanf(file, "%u%c", &cpu, &sep);
170		if (n <= 0)
171			break;
172		if (prev >= 0) {
173			int new_max = nr_cpus + cpu - prev - 1;
174
175			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
176							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
177
178			if (new_max >= max_entries) {
179				max_entries = new_max + MAX_NR_CPUS / 2;
180				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
181				if (tmp == NULL)
182					goto out_free_tmp;
183				tmp_cpus = tmp;
184			}
185
186			while (++prev < cpu)
187				tmp_cpus[nr_cpus++].cpu = prev;
188		}
189		if (nr_cpus == max_entries) {
190			max_entries += MAX_NR_CPUS;
191			tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
192			if (tmp == NULL)
193				goto out_free_tmp;
194			tmp_cpus = tmp;
195		}
196
197		tmp_cpus[nr_cpus++].cpu = cpu;
198		if (n == 2 && sep == '-')
199			prev = cpu;
200		else
201			prev = -1;
202		if (n == 1 || sep == '\n')
203			break;
204	}
205
206	if (nr_cpus > 0)
207		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
208out_free_tmp:
209	free(tmp_cpus);
210	return cpus;
211}
212
213struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
214{
215	struct perf_cpu_map *cpus = NULL;
216	unsigned long start_cpu, end_cpu = 0;
217	char *p = NULL;
218	int i, nr_cpus = 0;
219	struct perf_cpu *tmp_cpus = NULL, *tmp;
220	int max_entries = 0;
221
222	if (!cpu_list)
223		return perf_cpu_map__new_online_cpus();
224
225	/*
226	 * must handle the case of empty cpumap to cover
227	 * TOPOLOGY header for NUMA nodes with no CPU
228	 * ( e.g., because of CPU hotplug)
229	 */
230	if (!isdigit(*cpu_list) && *cpu_list != '\0')
231		goto out;
232
233	while (isdigit(*cpu_list)) {
234		p = NULL;
235		start_cpu = strtoul(cpu_list, &p, 0);
236		if (start_cpu >= INT_MAX
237		    || (*p != '\0' && *p != ',' && *p != '-'))
238			goto invalid;
239
240		if (*p == '-') {
241			cpu_list = ++p;
242			p = NULL;
243			end_cpu = strtoul(cpu_list, &p, 0);
244
245			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
246				goto invalid;
247
248			if (end_cpu < start_cpu)
249				goto invalid;
250		} else {
251			end_cpu = start_cpu;
252		}
253
254		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
255						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
256
257		for (; start_cpu <= end_cpu; start_cpu++) {
258			/* check for duplicates */
259			for (i = 0; i < nr_cpus; i++)
260				if (tmp_cpus[i].cpu == (int)start_cpu)
261					goto invalid;
262
263			if (nr_cpus == max_entries) {
264				max_entries += MAX_NR_CPUS;
265				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
266				if (tmp == NULL)
267					goto invalid;
268				tmp_cpus = tmp;
269			}
270			tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
271		}
272		if (*p)
273			++p;
274
275		cpu_list = p;
276	}
277
278	if (nr_cpus > 0)
279		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
280	else if (*cpu_list != '\0') {
281		pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
282			   cpu_list);
283		cpus = perf_cpu_map__new_online_cpus();
284	} else
285		cpus = perf_cpu_map__new_any_cpu();
286invalid:
287	free(tmp_cpus);
288out:
289	return cpus;
290}
291
292static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
293{
294	return RC_CHK_ACCESS(cpus)->nr;
295}
296
297struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
298{
299	struct perf_cpu result = {
300		.cpu = -1
301	};
302
303	if (cpus && idx < __perf_cpu_map__nr(cpus))
304		return __perf_cpu_map__cpu(cpus, idx);
305
306	return result;
307}
308
309int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
310{
311	return cpus ? __perf_cpu_map__nr(cpus) : 1;
312}
313
314bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
315{
316	return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
317}
318
 
 
 
 
 
 
 
 
 
 
 
 
 
319int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
320{
321	int low, high;
322
323	if (!cpus)
324		return -1;
325
326	low = 0;
327	high = __perf_cpu_map__nr(cpus);
328	while (low < high) {
329		int idx = (low + high) / 2;
330		struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
331
332		if (cpu_at_idx.cpu == cpu.cpu)
333			return idx;
334
335		if (cpu_at_idx.cpu > cpu.cpu)
336			high = idx;
337		else
338			low = idx + 1;
339	}
340
341	return -1;
342}
343
344bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
345{
346	return perf_cpu_map__idx(cpus, cpu) != -1;
347}
348
349bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
350{
351	int nr;
352
353	if (lhs == rhs)
354		return true;
355
356	if (!lhs || !rhs)
357		return false;
358
359	nr = __perf_cpu_map__nr(lhs);
360	if (nr != __perf_cpu_map__nr(rhs))
361		return false;
362
363	for (int idx = 0; idx < nr; idx++) {
364		if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
365			return false;
366	}
367	return true;
368}
369
370bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
371{
372	return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373}
374
375struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
376{
377	struct perf_cpu result = {
378		.cpu = -1
379	};
380
381	// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
382	return __perf_cpu_map__nr(map) > 0
383		? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
384		: result;
385}
386
387/** Is 'b' a subset of 'a'. */
388bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
389{
390	if (a == b || !b)
391		return true;
392	if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
393		return false;
394
395	for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
396		if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
397			return false;
398		if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
399			j++;
400			if (j == __perf_cpu_map__nr(b))
401				return true;
402		}
403	}
404	return false;
405}
406
407/*
408 * Merge two cpumaps
409 *
410 * orig either gets freed and replaced with a new map, or reused
411 * with no reference count change (similar to "realloc")
412 * other has its reference count increased.
413 */
414
415struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
416					 struct perf_cpu_map *other)
417{
418	struct perf_cpu *tmp_cpus;
419	int tmp_len;
420	int i, j, k;
421	struct perf_cpu_map *merged;
422
423	if (perf_cpu_map__is_subset(orig, other))
424		return orig;
425	if (perf_cpu_map__is_subset(other, orig)) {
426		perf_cpu_map__put(orig);
427		return perf_cpu_map__get(other);
428	}
429
430	tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
431	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
432	if (!tmp_cpus)
433		return NULL;
434
435	/* Standard merge algorithm from wikipedia */
436	i = j = k = 0;
437	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
438		if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
439			if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
440				j++;
441			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
442		} else
443			tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
444	}
445
446	while (i < __perf_cpu_map__nr(orig))
447		tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
448
449	while (j < __perf_cpu_map__nr(other))
450		tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
451	assert(k <= tmp_len);
452
453	merged = cpu_map__trim_new(k, tmp_cpus);
454	free(tmp_cpus);
455	perf_cpu_map__put(orig);
456	return merged;
457}
458
459struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
460					     struct perf_cpu_map *other)
461{
462	struct perf_cpu *tmp_cpus;
463	int tmp_len;
464	int i, j, k;
465	struct perf_cpu_map *merged = NULL;
466
467	if (perf_cpu_map__is_subset(other, orig))
468		return perf_cpu_map__get(orig);
469	if (perf_cpu_map__is_subset(orig, other))
470		return perf_cpu_map__get(other);
471
472	tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
473	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
474	if (!tmp_cpus)
475		return NULL;
476
477	i = j = k = 0;
478	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
479		if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
480			i++;
481		else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
482			j++;
483		else {
484			j++;
485			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
486		}
487	}
488	if (k)
489		merged = cpu_map__trim_new(k, tmp_cpus);
490	free(tmp_cpus);
491	return merged;
492}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <perf/cpumap.h>
  3#include <stdlib.h>
  4#include <linux/refcount.h>
  5#include <internal/cpumap.h>
  6#include <asm/bug.h>
  7#include <stdio.h>
  8#include <string.h>
  9#include <unistd.h>
 10#include <ctype.h>
 11#include <limits.h>
 12#include "internal.h"
 13
 14void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
 15{
 16	RC_CHK_ACCESS(map)->nr = nr_cpus;
 17}
 18
 19struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
 20{
 21	RC_STRUCT(perf_cpu_map) *cpus;
 22	struct perf_cpu_map *result;
 23
 24	if (nr_cpus == 0)
 25		return NULL;
 26
 27	cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
 28	if (ADD_RC_CHK(result, cpus)) {
 29		cpus->nr = nr_cpus;
 30		refcount_set(&cpus->refcnt, 1);
 31	}
 32	return result;
 33}
 34
 35struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
 36{
 37	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
 38
 39	if (cpus)
 40		RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
 41
 42	return cpus;
 43}
 44
 45static void cpu_map__delete(struct perf_cpu_map *map)
 46{
 47	if (map) {
 48		WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
 49			  "cpu_map refcnt unbalanced\n");
 50		RC_CHK_FREE(map);
 51	}
 52}
 53
 54struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
 55{
 56	struct perf_cpu_map *result;
 57
 58	if (RC_CHK_GET(result, map))
 59		refcount_inc(perf_cpu_map__refcnt(map));
 60
 61	return result;
 62}
 63
 64void perf_cpu_map__put(struct perf_cpu_map *map)
 65{
 66	if (map) {
 67		if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
 68			cpu_map__delete(map);
 69		else
 70			RC_CHK_PUT(map);
 71	}
 72}
 73
 74static struct perf_cpu_map *cpu_map__new_sysconf(void)
 75{
 76	struct perf_cpu_map *cpus;
 77	int nr_cpus, nr_cpus_conf;
 78
 79	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 80	if (nr_cpus < 0)
 81		return NULL;
 82
 83	nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
 84	if (nr_cpus != nr_cpus_conf) {
 85		pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
 86			nr_cpus, nr_cpus_conf, nr_cpus);
 87	}
 88
 89	cpus = perf_cpu_map__alloc(nr_cpus);
 90	if (cpus != NULL) {
 91		int i;
 92
 93		for (i = 0; i < nr_cpus; ++i)
 94			RC_CHK_ACCESS(cpus)->map[i].cpu = i;
 95	}
 96
 97	return cpus;
 98}
 99
100static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
101{
102	struct perf_cpu_map *cpus = NULL;
103	FILE *onlnf;
104
105	onlnf = fopen("/sys/devices/system/cpu/online", "r");
106	if (onlnf) {
107		cpus = perf_cpu_map__read(onlnf);
108		fclose(onlnf);
109	}
110	return cpus;
111}
112
113struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
114{
115	struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
116
117	if (cpus)
118		return cpus;
119
120	return cpu_map__new_sysconf();
121}
122
123
124static int cmp_cpu(const void *a, const void *b)
125{
126	const struct perf_cpu *cpu_a = a, *cpu_b = b;
127
128	return cpu_a->cpu - cpu_b->cpu;
129}
130
131static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
132{
133	return RC_CHK_ACCESS(cpus)->map[idx];
134}
135
136static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
137{
138	size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
139	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
140	int i, j;
141
142	if (cpus != NULL) {
143		memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
144		qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
145		/* Remove dups */
146		j = 0;
147		for (i = 0; i < nr_cpus; i++) {
148			if (i == 0 ||
149			    __perf_cpu_map__cpu(cpus, i).cpu !=
150			    __perf_cpu_map__cpu(cpus, i - 1).cpu) {
151				RC_CHK_ACCESS(cpus)->map[j++].cpu =
152					__perf_cpu_map__cpu(cpus, i).cpu;
153			}
154		}
155		perf_cpu_map__set_nr(cpus, j);
156		assert(j <= nr_cpus);
157	}
158	return cpus;
159}
160
161struct perf_cpu_map *perf_cpu_map__read(FILE *file)
162{
163	struct perf_cpu_map *cpus = NULL;
164	int nr_cpus = 0;
165	struct perf_cpu *tmp_cpus = NULL, *tmp;
166	int max_entries = 0;
167	int n, cpu, prev;
168	char sep;
169
170	sep = 0;
171	prev = -1;
172	for (;;) {
173		n = fscanf(file, "%u%c", &cpu, &sep);
174		if (n <= 0)
175			break;
176		if (prev >= 0) {
177			int new_max = nr_cpus + cpu - prev - 1;
178
179			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
180							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
181
182			if (new_max >= max_entries) {
183				max_entries = new_max + MAX_NR_CPUS / 2;
184				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
185				if (tmp == NULL)
186					goto out_free_tmp;
187				tmp_cpus = tmp;
188			}
189
190			while (++prev < cpu)
191				tmp_cpus[nr_cpus++].cpu = prev;
192		}
193		if (nr_cpus == max_entries) {
194			max_entries += MAX_NR_CPUS;
195			tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
196			if (tmp == NULL)
197				goto out_free_tmp;
198			tmp_cpus = tmp;
199		}
200
201		tmp_cpus[nr_cpus++].cpu = cpu;
202		if (n == 2 && sep == '-')
203			prev = cpu;
204		else
205			prev = -1;
206		if (n == 1 || sep == '\n')
207			break;
208	}
209
210	if (nr_cpus > 0)
211		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
212out_free_tmp:
213	free(tmp_cpus);
214	return cpus;
215}
216
217struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
218{
219	struct perf_cpu_map *cpus = NULL;
220	unsigned long start_cpu, end_cpu = 0;
221	char *p = NULL;
222	int i, nr_cpus = 0;
223	struct perf_cpu *tmp_cpus = NULL, *tmp;
224	int max_entries = 0;
225
226	if (!cpu_list)
227		return perf_cpu_map__new_online_cpus();
228
229	/*
230	 * must handle the case of empty cpumap to cover
231	 * TOPOLOGY header for NUMA nodes with no CPU
232	 * ( e.g., because of CPU hotplug)
233	 */
234	if (!isdigit(*cpu_list) && *cpu_list != '\0')
235		goto out;
236
237	while (isdigit(*cpu_list)) {
238		p = NULL;
239		start_cpu = strtoul(cpu_list, &p, 0);
240		if (start_cpu >= INT_MAX
241		    || (*p != '\0' && *p != ',' && *p != '-'))
242			goto invalid;
243
244		if (*p == '-') {
245			cpu_list = ++p;
246			p = NULL;
247			end_cpu = strtoul(cpu_list, &p, 0);
248
249			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
250				goto invalid;
251
252			if (end_cpu < start_cpu)
253				goto invalid;
254		} else {
255			end_cpu = start_cpu;
256		}
257
258		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
259						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
260
261		for (; start_cpu <= end_cpu; start_cpu++) {
262			/* check for duplicates */
263			for (i = 0; i < nr_cpus; i++)
264				if (tmp_cpus[i].cpu == (int)start_cpu)
265					goto invalid;
266
267			if (nr_cpus == max_entries) {
268				max_entries += MAX_NR_CPUS;
269				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
270				if (tmp == NULL)
271					goto invalid;
272				tmp_cpus = tmp;
273			}
274			tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
275		}
276		if (*p)
277			++p;
278
279		cpu_list = p;
280	}
281
282	if (nr_cpus > 0)
283		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
284	else if (*cpu_list != '\0') {
285		pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
286			   cpu_list);
287		cpus = perf_cpu_map__new_online_cpus();
288	} else
289		cpus = perf_cpu_map__new_any_cpu();
290invalid:
291	free(tmp_cpus);
292out:
293	return cpus;
294}
295
296static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
297{
298	return RC_CHK_ACCESS(cpus)->nr;
299}
300
301struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
302{
303	struct perf_cpu result = {
304		.cpu = -1
305	};
306
307	if (cpus && idx < __perf_cpu_map__nr(cpus))
308		return __perf_cpu_map__cpu(cpus, idx);
309
310	return result;
311}
312
313int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
314{
315	return cpus ? __perf_cpu_map__nr(cpus) : 1;
316}
317
318bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
319{
320	return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
321}
322
323bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
324{
325	if (!map)
326		return true;
327
328	return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
329}
330
331bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
332{
333	return map == NULL;
334}
335
336int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
337{
338	int low, high;
339
340	if (!cpus)
341		return -1;
342
343	low = 0;
344	high = __perf_cpu_map__nr(cpus);
345	while (low < high) {
346		int idx = (low + high) / 2;
347		struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
348
349		if (cpu_at_idx.cpu == cpu.cpu)
350			return idx;
351
352		if (cpu_at_idx.cpu > cpu.cpu)
353			high = idx;
354		else
355			low = idx + 1;
356	}
357
358	return -1;
359}
360
361bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
362{
363	return perf_cpu_map__idx(cpus, cpu) != -1;
364}
365
366bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
367{
368	int nr;
369
370	if (lhs == rhs)
371		return true;
372
373	if (!lhs || !rhs)
374		return false;
375
376	nr = __perf_cpu_map__nr(lhs);
377	if (nr != __perf_cpu_map__nr(rhs))
378		return false;
379
380	for (int idx = 0; idx < nr; idx++) {
381		if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
382			return false;
383	}
384	return true;
385}
386
387bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
388{
389	return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
390}
391
392struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
393{
394	struct perf_cpu cpu, result = {
395		.cpu = -1
396	};
397	int idx;
398
399	perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
400		result = cpu;
401		break;
402	}
403	return result;
404}
405
406struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
407{
408	struct perf_cpu result = {
409		.cpu = -1
410	};
411
412	// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
413	return __perf_cpu_map__nr(map) > 0
414		? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
415		: result;
416}
417
418/** Is 'b' a subset of 'a'. */
419bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
420{
421	if (a == b || !b)
422		return true;
423	if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
424		return false;
425
426	for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
427		if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
428			return false;
429		if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
430			j++;
431			if (j == __perf_cpu_map__nr(b))
432				return true;
433		}
434	}
435	return false;
436}
437
438/*
439 * Merge two cpumaps
440 *
441 * orig either gets freed and replaced with a new map, or reused
442 * with no reference count change (similar to "realloc")
443 * other has its reference count increased.
444 */
445
446struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
447					 struct perf_cpu_map *other)
448{
449	struct perf_cpu *tmp_cpus;
450	int tmp_len;
451	int i, j, k;
452	struct perf_cpu_map *merged;
453
454	if (perf_cpu_map__is_subset(orig, other))
455		return orig;
456	if (perf_cpu_map__is_subset(other, orig)) {
457		perf_cpu_map__put(orig);
458		return perf_cpu_map__get(other);
459	}
460
461	tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
462	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
463	if (!tmp_cpus)
464		return NULL;
465
466	/* Standard merge algorithm from wikipedia */
467	i = j = k = 0;
468	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
469		if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
470			if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
471				j++;
472			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
473		} else
474			tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
475	}
476
477	while (i < __perf_cpu_map__nr(orig))
478		tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
479
480	while (j < __perf_cpu_map__nr(other))
481		tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
482	assert(k <= tmp_len);
483
484	merged = cpu_map__trim_new(k, tmp_cpus);
485	free(tmp_cpus);
486	perf_cpu_map__put(orig);
487	return merged;
488}
489
490struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
491					     struct perf_cpu_map *other)
492{
493	struct perf_cpu *tmp_cpus;
494	int tmp_len;
495	int i, j, k;
496	struct perf_cpu_map *merged = NULL;
497
498	if (perf_cpu_map__is_subset(other, orig))
499		return perf_cpu_map__get(orig);
500	if (perf_cpu_map__is_subset(orig, other))
501		return perf_cpu_map__get(other);
502
503	tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
504	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
505	if (!tmp_cpus)
506		return NULL;
507
508	i = j = k = 0;
509	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
510		if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
511			i++;
512		else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
513			j++;
514		else {
515			j++;
516			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
517		}
518	}
519	if (k)
520		merged = cpu_map__trim_new(k, tmp_cpus);
521	free(tmp_cpus);
522	return merged;
523}