Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <perf/cpumap.h>
  3#include <stdlib.h>
  4#include <linux/refcount.h>
  5#include <internal/cpumap.h>
  6#include <asm/bug.h>
  7#include <stdio.h>
  8#include <string.h>
  9#include <unistd.h>
 10#include <ctype.h>
 11#include <limits.h>
 12#include "internal.h"
 13
 14void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
 15{
 16	RC_CHK_ACCESS(map)->nr = nr_cpus;
 17}
 18
 19struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
 20{
 21	RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
 22	struct perf_cpu_map *result;
 23
 24	if (ADD_RC_CHK(result, cpus)) {
 25		cpus->nr = nr_cpus;
 
 26		refcount_set(&cpus->refcnt, 1);
 27	}
 28	return result;
 29}
 30
 31struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
 32{
 33	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
 34
 35	if (cpus)
 36		RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
 37
 38	return cpus;
 39}
 40
 41static void cpu_map__delete(struct perf_cpu_map *map)
 42{
 43	if (map) {
 44		WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
 45			  "cpu_map refcnt unbalanced\n");
 46		RC_CHK_FREE(map);
 47	}
 48}
 49
 50struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
 51{
 52	struct perf_cpu_map *result;
 53
 54	if (RC_CHK_GET(result, map))
 55		refcount_inc(perf_cpu_map__refcnt(map));
 56
 57	return result;
 58}
 59
 60void perf_cpu_map__put(struct perf_cpu_map *map)
 61{
 62	if (map) {
 63		if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
 64			cpu_map__delete(map);
 65		else
 66			RC_CHK_PUT(map);
 67	}
 68}
 69
 70static struct perf_cpu_map *cpu_map__new_sysconf(void)
 71{
 72	struct perf_cpu_map *cpus;
 73	int nr_cpus, nr_cpus_conf;
 74
 75	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 76	if (nr_cpus < 0)
 77		return NULL;
 78
 79	nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
 80	if (nr_cpus != nr_cpus_conf) {
 81		pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
 82			nr_cpus, nr_cpus_conf, nr_cpus);
 83	}
 84
 85	cpus = perf_cpu_map__alloc(nr_cpus);
 86	if (cpus != NULL) {
 87		int i;
 88
 89		for (i = 0; i < nr_cpus; ++i)
 90			RC_CHK_ACCESS(cpus)->map[i].cpu = i;
 91	}
 92
 93	return cpus;
 94}
 95
 96static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
 97{
 98	struct perf_cpu_map *cpus = NULL;
 99	FILE *onlnf;
100
101	onlnf = fopen("/sys/devices/system/cpu/online", "r");
102	if (onlnf) {
103		cpus = perf_cpu_map__read(onlnf);
104		fclose(onlnf);
105	}
106	return cpus;
107}
108
109struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
110{
111	struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
112
113	if (cpus)
114		return cpus;
115
116	return cpu_map__new_sysconf();
117}
118
119
120static int cmp_cpu(const void *a, const void *b)
121{
122	const struct perf_cpu *cpu_a = a, *cpu_b = b;
123
124	return cpu_a->cpu - cpu_b->cpu;
125}
126
127static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
128{
129	return RC_CHK_ACCESS(cpus)->map[idx];
130}
131
132static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
133{
134	size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
135	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
136	int i, j;
137
138	if (cpus != NULL) {
139		memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
140		qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
141		/* Remove dups */
142		j = 0;
143		for (i = 0; i < nr_cpus; i++) {
144			if (i == 0 ||
145			    __perf_cpu_map__cpu(cpus, i).cpu !=
146			    __perf_cpu_map__cpu(cpus, i - 1).cpu) {
147				RC_CHK_ACCESS(cpus)->map[j++].cpu =
148					__perf_cpu_map__cpu(cpus, i).cpu;
149			}
150		}
151		perf_cpu_map__set_nr(cpus, j);
152		assert(j <= nr_cpus);
 
153	}
 
154	return cpus;
155}
156
157struct perf_cpu_map *perf_cpu_map__read(FILE *file)
158{
159	struct perf_cpu_map *cpus = NULL;
160	int nr_cpus = 0;
161	struct perf_cpu *tmp_cpus = NULL, *tmp;
162	int max_entries = 0;
163	int n, cpu, prev;
164	char sep;
165
166	sep = 0;
167	prev = -1;
168	for (;;) {
169		n = fscanf(file, "%u%c", &cpu, &sep);
170		if (n <= 0)
171			break;
172		if (prev >= 0) {
173			int new_max = nr_cpus + cpu - prev - 1;
174
175			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
176							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
177
178			if (new_max >= max_entries) {
179				max_entries = new_max + MAX_NR_CPUS / 2;
180				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
181				if (tmp == NULL)
182					goto out_free_tmp;
183				tmp_cpus = tmp;
184			}
185
186			while (++prev < cpu)
187				tmp_cpus[nr_cpus++].cpu = prev;
188		}
189		if (nr_cpus == max_entries) {
190			max_entries += MAX_NR_CPUS;
191			tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
192			if (tmp == NULL)
193				goto out_free_tmp;
194			tmp_cpus = tmp;
195		}
196
197		tmp_cpus[nr_cpus++].cpu = cpu;
198		if (n == 2 && sep == '-')
199			prev = cpu;
200		else
201			prev = -1;
202		if (n == 1 || sep == '\n')
203			break;
204	}
205
206	if (nr_cpus > 0)
207		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 
 
208out_free_tmp:
209	free(tmp_cpus);
210	return cpus;
211}
212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
214{
215	struct perf_cpu_map *cpus = NULL;
216	unsigned long start_cpu, end_cpu = 0;
217	char *p = NULL;
218	int i, nr_cpus = 0;
219	struct perf_cpu *tmp_cpus = NULL, *tmp;
220	int max_entries = 0;
221
222	if (!cpu_list)
223		return perf_cpu_map__new_online_cpus();
224
225	/*
226	 * must handle the case of empty cpumap to cover
227	 * TOPOLOGY header for NUMA nodes with no CPU
228	 * ( e.g., because of CPU hotplug)
229	 */
230	if (!isdigit(*cpu_list) && *cpu_list != '\0')
231		goto out;
232
233	while (isdigit(*cpu_list)) {
234		p = NULL;
235		start_cpu = strtoul(cpu_list, &p, 0);
236		if (start_cpu >= INT_MAX
237		    || (*p != '\0' && *p != ',' && *p != '-'))
238			goto invalid;
239
240		if (*p == '-') {
241			cpu_list = ++p;
242			p = NULL;
243			end_cpu = strtoul(cpu_list, &p, 0);
244
245			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
246				goto invalid;
247
248			if (end_cpu < start_cpu)
249				goto invalid;
250		} else {
251			end_cpu = start_cpu;
252		}
253
254		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
255						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
256
257		for (; start_cpu <= end_cpu; start_cpu++) {
258			/* check for duplicates */
259			for (i = 0; i < nr_cpus; i++)
260				if (tmp_cpus[i].cpu == (int)start_cpu)
261					goto invalid;
262
263			if (nr_cpus == max_entries) {
264				max_entries += MAX_NR_CPUS;
265				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
266				if (tmp == NULL)
267					goto invalid;
268				tmp_cpus = tmp;
269			}
270			tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
271		}
272		if (*p)
273			++p;
274
275		cpu_list = p;
276	}
277
278	if (nr_cpus > 0)
279		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
280	else if (*cpu_list != '\0') {
281		pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
282			   cpu_list);
283		cpus = perf_cpu_map__new_online_cpus();
284	} else
285		cpus = perf_cpu_map__new_any_cpu();
286invalid:
287	free(tmp_cpus);
288out:
289	return cpus;
290}
291
292static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
293{
294	return RC_CHK_ACCESS(cpus)->nr;
295}
296
297struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
298{
299	struct perf_cpu result = {
300		.cpu = -1
301	};
302
303	if (cpus && idx < __perf_cpu_map__nr(cpus))
304		return __perf_cpu_map__cpu(cpus, idx);
305
306	return result;
307}
308
309int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
310{
311	return cpus ? __perf_cpu_map__nr(cpus) : 1;
312}
313
314bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
315{
316	return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
317}
318
319int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
320{
321	int low, high;
322
323	if (!cpus)
324		return -1;
325
326	low = 0;
327	high = __perf_cpu_map__nr(cpus);
328	while (low < high) {
329		int idx = (low + high) / 2;
330		struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
331
332		if (cpu_at_idx.cpu == cpu.cpu)
333			return idx;
334
335		if (cpu_at_idx.cpu > cpu.cpu)
336			high = idx;
337		else
338			low = idx + 1;
339	}
340
341	return -1;
342}
343
344bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
345{
346	return perf_cpu_map__idx(cpus, cpu) != -1;
347}
348
349bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
350{
351	int nr;
352
353	if (lhs == rhs)
354		return true;
355
356	if (!lhs || !rhs)
357		return false;
358
359	nr = __perf_cpu_map__nr(lhs);
360	if (nr != __perf_cpu_map__nr(rhs))
361		return false;
362
363	for (int idx = 0; idx < nr; idx++) {
364		if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
365			return false;
366	}
367	return true;
368}
369
370bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
371{
372	return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
373}
374
375struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
376{
377	struct perf_cpu result = {
378		.cpu = -1
379	};
380
381	// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
382	return __perf_cpu_map__nr(map) > 0
383		? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
384		: result;
385}
386
387/** Is 'b' a subset of 'a'. */
388bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
389{
390	if (a == b || !b)
391		return true;
392	if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
393		return false;
394
395	for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
396		if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
397			return false;
398		if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
399			j++;
400			if (j == __perf_cpu_map__nr(b))
401				return true;
402		}
403	}
404	return false;
405}
406
407/*
408 * Merge two cpumaps
409 *
410 * orig either gets freed and replaced with a new map, or reused
411 * with no reference count change (similar to "realloc")
412 * other has its reference count increased.
413 */
414
415struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
416					 struct perf_cpu_map *other)
417{
418	struct perf_cpu *tmp_cpus;
419	int tmp_len;
420	int i, j, k;
421	struct perf_cpu_map *merged;
422
423	if (perf_cpu_map__is_subset(orig, other))
424		return orig;
425	if (perf_cpu_map__is_subset(other, orig)) {
426		perf_cpu_map__put(orig);
427		return perf_cpu_map__get(other);
428	}
 
 
 
 
 
429
430	tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
431	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
432	if (!tmp_cpus)
433		return NULL;
434
435	/* Standard merge algorithm from wikipedia */
436	i = j = k = 0;
437	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
438		if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
439			if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
440				j++;
441			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
442		} else
443			tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
444	}
445
446	while (i < __perf_cpu_map__nr(orig))
447		tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
448
449	while (j < __perf_cpu_map__nr(other))
450		tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
451	assert(k <= tmp_len);
452
453	merged = cpu_map__trim_new(k, tmp_cpus);
454	free(tmp_cpus);
455	perf_cpu_map__put(orig);
456	return merged;
457}
458
459struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
460					     struct perf_cpu_map *other)
461{
462	struct perf_cpu *tmp_cpus;
463	int tmp_len;
464	int i, j, k;
465	struct perf_cpu_map *merged = NULL;
466
467	if (perf_cpu_map__is_subset(other, orig))
468		return perf_cpu_map__get(orig);
469	if (perf_cpu_map__is_subset(orig, other))
470		return perf_cpu_map__get(other);
471
472	tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
473	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
474	if (!tmp_cpus)
475		return NULL;
476
477	i = j = k = 0;
478	while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
479		if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
480			i++;
481		else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
482			j++;
483		else {
484			j++;
485			tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
486		}
487	}
488	if (k)
489		merged = cpu_map__trim_new(k, tmp_cpus);
490	free(tmp_cpus);
491	return merged;
492}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <perf/cpumap.h>
  3#include <stdlib.h>
  4#include <linux/refcount.h>
  5#include <internal/cpumap.h>
  6#include <asm/bug.h>
  7#include <stdio.h>
  8#include <string.h>
  9#include <unistd.h>
 10#include <ctype.h>
 11#include <limits.h>
 
 12
 13struct perf_cpu_map *perf_cpu_map__dummy_new(void)
 14{
 15	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
 
 
 
 
 
 
 16
 17	if (cpus != NULL) {
 18		cpus->nr = 1;
 19		cpus->map[0] = -1;
 20		refcount_set(&cpus->refcnt, 1);
 21	}
 
 
 
 
 
 
 
 
 
 22
 23	return cpus;
 24}
 25
 26static void cpu_map__delete(struct perf_cpu_map *map)
 27{
 28	if (map) {
 29		WARN_ONCE(refcount_read(&map->refcnt) != 0,
 30			  "cpu_map refcnt unbalanced\n");
 31		free(map);
 32	}
 33}
 34
 35struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
 36{
 37	if (map)
 38		refcount_inc(&map->refcnt);
 39	return map;
 
 
 
 40}
 41
 42void perf_cpu_map__put(struct perf_cpu_map *map)
 43{
 44	if (map && refcount_dec_and_test(&map->refcnt))
 45		cpu_map__delete(map);
 
 
 
 
 46}
 47
 48static struct perf_cpu_map *cpu_map__default_new(void)
 49{
 50	struct perf_cpu_map *cpus;
 51	int nr_cpus;
 52
 53	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 54	if (nr_cpus < 0)
 55		return NULL;
 56
 57	cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
 
 
 
 
 
 
 58	if (cpus != NULL) {
 59		int i;
 60
 61		for (i = 0; i < nr_cpus; ++i)
 62			cpus->map[i] = i;
 
 
 
 
 
 
 
 
 
 63
 64		cpus->nr = nr_cpus;
 65		refcount_set(&cpus->refcnt, 1);
 
 
 66	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67
 68	return cpus;
 69}
 70
 71static int cmp_int(const void *a, const void *b)
 72{
 73	return *(const int *)a - *(const int*)b;
 74}
 75
 76static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
 77{
 78	size_t payload_size = nr_cpus * sizeof(int);
 79	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
 80	int i, j;
 81
 82	if (cpus != NULL) {
 83		memcpy(cpus->map, tmp_cpus, payload_size);
 84		qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
 85		/* Remove dups */
 86		j = 0;
 87		for (i = 0; i < nr_cpus; i++) {
 88			if (i == 0 || cpus->map[i] != cpus->map[i - 1])
 89				cpus->map[j++] = cpus->map[i];
 
 
 
 
 90		}
 91		cpus->nr = j;
 92		assert(j <= nr_cpus);
 93		refcount_set(&cpus->refcnt, 1);
 94	}
 95
 96	return cpus;
 97}
 98
 99struct perf_cpu_map *perf_cpu_map__read(FILE *file)
100{
101	struct perf_cpu_map *cpus = NULL;
102	int nr_cpus = 0;
103	int *tmp_cpus = NULL, *tmp;
104	int max_entries = 0;
105	int n, cpu, prev;
106	char sep;
107
108	sep = 0;
109	prev = -1;
110	for (;;) {
111		n = fscanf(file, "%u%c", &cpu, &sep);
112		if (n <= 0)
113			break;
114		if (prev >= 0) {
115			int new_max = nr_cpus + cpu - prev - 1;
116
117			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
118							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
119
120			if (new_max >= max_entries) {
121				max_entries = new_max + MAX_NR_CPUS / 2;
122				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
123				if (tmp == NULL)
124					goto out_free_tmp;
125				tmp_cpus = tmp;
126			}
127
128			while (++prev < cpu)
129				tmp_cpus[nr_cpus++] = prev;
130		}
131		if (nr_cpus == max_entries) {
132			max_entries += MAX_NR_CPUS;
133			tmp = realloc(tmp_cpus, max_entries * sizeof(int));
134			if (tmp == NULL)
135				goto out_free_tmp;
136			tmp_cpus = tmp;
137		}
138
139		tmp_cpus[nr_cpus++] = cpu;
140		if (n == 2 && sep == '-')
141			prev = cpu;
142		else
143			prev = -1;
144		if (n == 1 || sep == '\n')
145			break;
146	}
147
148	if (nr_cpus > 0)
149		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
150	else
151		cpus = cpu_map__default_new();
152out_free_tmp:
153	free(tmp_cpus);
154	return cpus;
155}
156
157static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
158{
159	struct perf_cpu_map *cpus = NULL;
160	FILE *onlnf;
161
162	onlnf = fopen("/sys/devices/system/cpu/online", "r");
163	if (!onlnf)
164		return cpu_map__default_new();
165
166	cpus = perf_cpu_map__read(onlnf);
167	fclose(onlnf);
168	return cpus;
169}
170
171struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
172{
173	struct perf_cpu_map *cpus = NULL;
174	unsigned long start_cpu, end_cpu = 0;
175	char *p = NULL;
176	int i, nr_cpus = 0;
177	int *tmp_cpus = NULL, *tmp;
178	int max_entries = 0;
179
180	if (!cpu_list)
181		return cpu_map__read_all_cpu_map();
182
183	/*
184	 * must handle the case of empty cpumap to cover
185	 * TOPOLOGY header for NUMA nodes with no CPU
186	 * ( e.g., because of CPU hotplug)
187	 */
188	if (!isdigit(*cpu_list) && *cpu_list != '\0')
189		goto out;
190
191	while (isdigit(*cpu_list)) {
192		p = NULL;
193		start_cpu = strtoul(cpu_list, &p, 0);
194		if (start_cpu >= INT_MAX
195		    || (*p != '\0' && *p != ',' && *p != '-'))
196			goto invalid;
197
198		if (*p == '-') {
199			cpu_list = ++p;
200			p = NULL;
201			end_cpu = strtoul(cpu_list, &p, 0);
202
203			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
204				goto invalid;
205
206			if (end_cpu < start_cpu)
207				goto invalid;
208		} else {
209			end_cpu = start_cpu;
210		}
211
212		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
213						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
214
215		for (; start_cpu <= end_cpu; start_cpu++) {
216			/* check for duplicates */
217			for (i = 0; i < nr_cpus; i++)
218				if (tmp_cpus[i] == (int)start_cpu)
219					goto invalid;
220
221			if (nr_cpus == max_entries) {
222				max_entries += MAX_NR_CPUS;
223				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
224				if (tmp == NULL)
225					goto invalid;
226				tmp_cpus = tmp;
227			}
228			tmp_cpus[nr_cpus++] = (int)start_cpu;
229		}
230		if (*p)
231			++p;
232
233		cpu_list = p;
234	}
235
236	if (nr_cpus > 0)
237		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
238	else if (*cpu_list != '\0')
239		cpus = cpu_map__default_new();
240	else
241		cpus = perf_cpu_map__dummy_new();
 
 
242invalid:
243	free(tmp_cpus);
244out:
245	return cpus;
246}
247
248int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
 
 
 
 
 
249{
250	if (cpus && idx < cpus->nr)
251		return cpus->map[idx];
 
 
 
 
252
253	return -1;
254}
255
256int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
257{
258	return cpus ? cpus->nr : 1;
259}
260
261bool perf_cpu_map__empty(const struct perf_cpu_map *map)
262{
263	return map ? map->map[0] == -1 : true;
264}
265
266int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
267{
268	int i;
 
 
 
 
 
 
 
 
 
269
270	for (i = 0; i < cpus->nr; ++i) {
271		if (cpus->map[i] == cpu)
272			return i;
 
 
 
 
273	}
274
275	return -1;
276}
277
278int perf_cpu_map__max(struct perf_cpu_map *map)
 
 
 
 
 
279{
280	int i, max = -1;
 
 
 
281
282	for (i = 0; i < map->nr; i++) {
283		if (map->map[i] > max)
284			max = map->map[i];
 
 
 
 
 
 
 
285	}
 
 
286
287	return max;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288}
289
290/*
291 * Merge two cpumaps
292 *
293 * orig either gets freed and replaced with a new map, or reused
294 * with no reference count change (similar to "realloc")
295 * other has its reference count increased.
296 */
297
298struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
299					 struct perf_cpu_map *other)
300{
301	int *tmp_cpus;
302	int tmp_len;
303	int i, j, k;
304	struct perf_cpu_map *merged;
305
306	if (!orig && !other)
307		return NULL;
308	if (!orig) {
309		perf_cpu_map__get(other);
310		return other;
311	}
312	if (!other)
313		return orig;
314	if (orig->nr == other->nr &&
315	    !memcmp(orig->map, other->map, orig->nr * sizeof(int)))
316		return orig;
317
318	tmp_len = orig->nr + other->nr;
319	tmp_cpus = malloc(tmp_len * sizeof(int));
320	if (!tmp_cpus)
321		return NULL;
322
323	/* Standard merge algorithm from wikipedia */
324	i = j = k = 0;
325	while (i < orig->nr && j < other->nr) {
326		if (orig->map[i] <= other->map[j]) {
327			if (orig->map[i] == other->map[j])
328				j++;
329			tmp_cpus[k++] = orig->map[i++];
330		} else
331			tmp_cpus[k++] = other->map[j++];
332	}
333
334	while (i < orig->nr)
335		tmp_cpus[k++] = orig->map[i++];
336
337	while (j < other->nr)
338		tmp_cpus[k++] = other->map[j++];
339	assert(k <= tmp_len);
340
341	merged = cpu_map__trim_new(k, tmp_cpus);
342	free(tmp_cpus);
343	perf_cpu_map__put(orig);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344	return merged;
345}