Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include "cpumap.h"
  3#include "debug.h"
  4#include "env.h"
  5#include "util/header.h"
  6#include <linux/ctype.h>
  7#include <linux/zalloc.h>
  8#include "bpf-event.h"
  9#include "cgroup.h"
 10#include <errno.h>
 11#include <sys/utsname.h>
 12#include <bpf/libbpf.h>
 13#include <stdlib.h>
 14#include <string.h>
 15
 16struct perf_env perf_env;
 17
 18void perf_env__insert_bpf_prog_info(struct perf_env *env,
 19				    struct bpf_prog_info_node *info_node)
 20{
 21	__u32 prog_id = info_node->info_linear->info.id;
 22	struct bpf_prog_info_node *node;
 23	struct rb_node *parent = NULL;
 24	struct rb_node **p;
 25
 26	down_write(&env->bpf_progs.lock);
 27	p = &env->bpf_progs.infos.rb_node;
 28
 29	while (*p != NULL) {
 30		parent = *p;
 31		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
 32		if (prog_id < node->info_linear->info.id) {
 33			p = &(*p)->rb_left;
 34		} else if (prog_id > node->info_linear->info.id) {
 35			p = &(*p)->rb_right;
 36		} else {
 37			pr_debug("duplicated bpf prog info %u\n", prog_id);
 38			goto out;
 39		}
 40	}
 41
 42	rb_link_node(&info_node->rb_node, parent, p);
 43	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
 44	env->bpf_progs.infos_cnt++;
 45out:
 46	up_write(&env->bpf_progs.lock);
 47}
 48
 49struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
 50							__u32 prog_id)
 51{
 52	struct bpf_prog_info_node *node = NULL;
 53	struct rb_node *n;
 54
 55	down_read(&env->bpf_progs.lock);
 56	n = env->bpf_progs.infos.rb_node;
 57
 58	while (n) {
 59		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
 60		if (prog_id < node->info_linear->info.id)
 61			n = n->rb_left;
 62		else if (prog_id > node->info_linear->info.id)
 63			n = n->rb_right;
 64		else
 65			goto out;
 66	}
 67	node = NULL;
 68
 69out:
 70	up_read(&env->bpf_progs.lock);
 71	return node;
 72}
 73
 74void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
 75{
 76	struct rb_node *parent = NULL;
 77	__u32 btf_id = btf_node->id;
 78	struct btf_node *node;
 79	struct rb_node **p;
 80
 81	down_write(&env->bpf_progs.lock);
 82	p = &env->bpf_progs.btfs.rb_node;
 83
 84	while (*p != NULL) {
 85		parent = *p;
 86		node = rb_entry(parent, struct btf_node, rb_node);
 87		if (btf_id < node->id) {
 88			p = &(*p)->rb_left;
 89		} else if (btf_id > node->id) {
 90			p = &(*p)->rb_right;
 91		} else {
 92			pr_debug("duplicated btf %u\n", btf_id);
 93			goto out;
 94		}
 95	}
 96
 97	rb_link_node(&btf_node->rb_node, parent, p);
 98	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
 99	env->bpf_progs.btfs_cnt++;
100out:
101	up_write(&env->bpf_progs.lock);
102}
103
104struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
105{
106	struct btf_node *node = NULL;
107	struct rb_node *n;
108
109	down_read(&env->bpf_progs.lock);
110	n = env->bpf_progs.btfs.rb_node;
111
112	while (n) {
113		node = rb_entry(n, struct btf_node, rb_node);
114		if (btf_id < node->id)
115			n = n->rb_left;
116		else if (btf_id > node->id)
117			n = n->rb_right;
118		else
119			goto out;
120	}
121	node = NULL;
122
123out:
124	up_read(&env->bpf_progs.lock);
125	return node;
126}
127
128/* purge data in bpf_progs.infos tree */
129static void perf_env__purge_bpf(struct perf_env *env)
130{
131	struct rb_root *root;
132	struct rb_node *next;
133
134	down_write(&env->bpf_progs.lock);
135
136	root = &env->bpf_progs.infos;
137	next = rb_first(root);
138
139	while (next) {
140		struct bpf_prog_info_node *node;
141
142		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
143		next = rb_next(&node->rb_node);
144		rb_erase(&node->rb_node, root);
145		free(node);
146	}
147
148	env->bpf_progs.infos_cnt = 0;
149
150	root = &env->bpf_progs.btfs;
151	next = rb_first(root);
152
153	while (next) {
154		struct btf_node *node;
155
156		node = rb_entry(next, struct btf_node, rb_node);
157		next = rb_next(&node->rb_node);
158		rb_erase(&node->rb_node, root);
159		free(node);
160	}
161
162	env->bpf_progs.btfs_cnt = 0;
163
164	up_write(&env->bpf_progs.lock);
165}
166
167void perf_env__exit(struct perf_env *env)
168{
169	int i;
170
171	perf_env__purge_bpf(env);
172	perf_env__purge_cgroups(env);
173	zfree(&env->hostname);
174	zfree(&env->os_release);
175	zfree(&env->version);
176	zfree(&env->arch);
177	zfree(&env->cpu_desc);
178	zfree(&env->cpuid);
179	zfree(&env->cmdline);
180	zfree(&env->cmdline_argv);
181	zfree(&env->sibling_cores);
182	zfree(&env->sibling_threads);
183	zfree(&env->pmu_mappings);
184	zfree(&env->cpu);
185	zfree(&env->numa_map);
186
187	for (i = 0; i < env->nr_numa_nodes; i++)
188		perf_cpu_map__put(env->numa_nodes[i].map);
189	zfree(&env->numa_nodes);
190
191	for (i = 0; i < env->caches_cnt; i++)
192		cpu_cache_level__free(&env->caches[i]);
193	zfree(&env->caches);
194
195	for (i = 0; i < env->nr_memory_nodes; i++)
196		zfree(&env->memory_nodes[i].set);
197	zfree(&env->memory_nodes);
198}
199
200void perf_env__init(struct perf_env *env)
201{
202	env->bpf_progs.infos = RB_ROOT;
203	env->bpf_progs.btfs = RB_ROOT;
204	init_rwsem(&env->bpf_progs.lock);
205}
206
207int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
208{
209	int i;
210
211	/* do not include NULL termination */
212	env->cmdline_argv = calloc(argc, sizeof(char *));
213	if (env->cmdline_argv == NULL)
214		goto out_enomem;
215
216	/*
217	 * Must copy argv contents because it gets moved around during option
218	 * parsing:
219	 */
220	for (i = 0; i < argc ; i++) {
221		env->cmdline_argv[i] = argv[i];
222		if (env->cmdline_argv[i] == NULL)
223			goto out_free;
224	}
225
226	env->nr_cmdline = argc;
227
228	return 0;
229out_free:
230	zfree(&env->cmdline_argv);
231out_enomem:
232	return -ENOMEM;
233}
234
235int perf_env__read_cpu_topology_map(struct perf_env *env)
236{
237	int cpu, nr_cpus;
238
239	if (env->cpu != NULL)
240		return 0;
241
242	if (env->nr_cpus_avail == 0)
243		env->nr_cpus_avail = cpu__max_present_cpu();
244
245	nr_cpus = env->nr_cpus_avail;
246	if (nr_cpus == -1)
247		return -EINVAL;
248
249	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
250	if (env->cpu == NULL)
251		return -ENOMEM;
252
253	for (cpu = 0; cpu < nr_cpus; ++cpu) {
254		env->cpu[cpu].core_id	= cpu_map__get_core_id(cpu);
255		env->cpu[cpu].socket_id	= cpu_map__get_socket_id(cpu);
256		env->cpu[cpu].die_id	= cpu_map__get_die_id(cpu);
257	}
258
259	env->nr_cpus_avail = nr_cpus;
260	return 0;
261}
262
263int perf_env__read_cpuid(struct perf_env *env)
264{
265	char cpuid[128];
266	int err = get_cpuid(cpuid, sizeof(cpuid));
267
268	if (err)
269		return err;
270
271	free(env->cpuid);
272	env->cpuid = strdup(cpuid);
273	if (env->cpuid == NULL)
274		return ENOMEM;
275	return 0;
276}
277
278static int perf_env__read_arch(struct perf_env *env)
279{
280	struct utsname uts;
281
282	if (env->arch)
283		return 0;
284
285	if (!uname(&uts))
286		env->arch = strdup(uts.machine);
287
288	return env->arch ? 0 : -ENOMEM;
289}
290
291static int perf_env__read_nr_cpus_avail(struct perf_env *env)
292{
293	if (env->nr_cpus_avail == 0)
294		env->nr_cpus_avail = cpu__max_present_cpu();
295
296	return env->nr_cpus_avail ? 0 : -ENOENT;
297}
298
299const char *perf_env__raw_arch(struct perf_env *env)
300{
301	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
302}
303
304int perf_env__nr_cpus_avail(struct perf_env *env)
305{
306	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
307}
308
309void cpu_cache_level__free(struct cpu_cache_level *cache)
310{
311	zfree(&cache->type);
312	zfree(&cache->map);
313	zfree(&cache->size);
314}
315
316/*
317 * Return architecture name in a normalized form.
318 * The conversion logic comes from the Makefile.
319 */
320static const char *normalize_arch(char *arch)
321{
322	if (!strcmp(arch, "x86_64"))
323		return "x86";
324	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
325		return "x86";
326	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
327		return "sparc";
328	if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
329		return "arm64";
330	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
331		return "arm";
332	if (!strncmp(arch, "s390", 4))
333		return "s390";
334	if (!strncmp(arch, "parisc", 6))
335		return "parisc";
336	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
337		return "powerpc";
338	if (!strncmp(arch, "mips", 4))
339		return "mips";
340	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
341		return "sh";
342
343	return arch;
344}
345
346const char *perf_env__arch(struct perf_env *env)
347{
 
348	char *arch_name;
349
350	if (!env || !env->arch) { /* Assume local operation */
351		static struct utsname uts = { .machine[0] = '\0', };
352		if (uts.machine[0] == '\0' && uname(&uts) < 0)
353			return NULL;
354		arch_name = uts.machine;
355	} else
356		arch_name = env->arch;
357
358	return normalize_arch(arch_name);
359}
360
361
362int perf_env__numa_node(struct perf_env *env, int cpu)
363{
364	if (!env->nr_numa_map) {
365		struct numa_node *nn;
366		int i, nr = 0;
367
368		for (i = 0; i < env->nr_numa_nodes; i++) {
369			nn = &env->numa_nodes[i];
370			nr = max(nr, perf_cpu_map__max(nn->map));
371		}
372
373		nr++;
374
375		/*
376		 * We initialize the numa_map array to prepare
377		 * it for missing cpus, which return node -1
378		 */
379		env->numa_map = malloc(nr * sizeof(int));
380		if (!env->numa_map)
381			return -1;
382
383		for (i = 0; i < nr; i++)
384			env->numa_map[i] = -1;
385
386		env->nr_numa_map = nr;
387
388		for (i = 0; i < env->nr_numa_nodes; i++) {
389			int tmp, j;
390
391			nn = &env->numa_nodes[i];
392			perf_cpu_map__for_each_cpu(j, tmp, nn->map)
393				env->numa_map[j] = i;
394		}
395	}
396
397	return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
398}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include "cpumap.h"
 
  3#include "env.h"
  4#include "sane_ctype.h"
  5#include "util.h"
 
 
 
  6#include <errno.h>
  7#include <sys/utsname.h>
 
 
 
  8
  9struct perf_env perf_env;
 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 11void perf_env__exit(struct perf_env *env)
 12{
 13	int i;
 14
 
 
 15	zfree(&env->hostname);
 16	zfree(&env->os_release);
 17	zfree(&env->version);
 18	zfree(&env->arch);
 19	zfree(&env->cpu_desc);
 20	zfree(&env->cpuid);
 21	zfree(&env->cmdline);
 22	zfree(&env->cmdline_argv);
 23	zfree(&env->sibling_cores);
 24	zfree(&env->sibling_threads);
 25	zfree(&env->pmu_mappings);
 26	zfree(&env->cpu);
 
 27
 28	for (i = 0; i < env->nr_numa_nodes; i++)
 29		cpu_map__put(env->numa_nodes[i].map);
 30	zfree(&env->numa_nodes);
 31
 32	for (i = 0; i < env->caches_cnt; i++)
 33		cpu_cache_level__free(&env->caches[i]);
 34	zfree(&env->caches);
 35
 36	for (i = 0; i < env->nr_memory_nodes; i++)
 37		free(env->memory_nodes[i].set);
 38	zfree(&env->memory_nodes);
 39}
 40
 
 
 
 
 
 
 
 41int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
 42{
 43	int i;
 44
 45	/* do not include NULL termination */
 46	env->cmdline_argv = calloc(argc, sizeof(char *));
 47	if (env->cmdline_argv == NULL)
 48		goto out_enomem;
 49
 50	/*
 51	 * Must copy argv contents because it gets moved around during option
 52	 * parsing:
 53	 */
 54	for (i = 0; i < argc ; i++) {
 55		env->cmdline_argv[i] = argv[i];
 56		if (env->cmdline_argv[i] == NULL)
 57			goto out_free;
 58	}
 59
 60	env->nr_cmdline = argc;
 61
 62	return 0;
 63out_free:
 64	zfree(&env->cmdline_argv);
 65out_enomem:
 66	return -ENOMEM;
 67}
 68
 69int perf_env__read_cpu_topology_map(struct perf_env *env)
 70{
 71	int cpu, nr_cpus;
 72
 73	if (env->cpu != NULL)
 74		return 0;
 75
 76	if (env->nr_cpus_avail == 0)
 77		env->nr_cpus_avail = cpu__max_present_cpu();
 78
 79	nr_cpus = env->nr_cpus_avail;
 80	if (nr_cpus == -1)
 81		return -EINVAL;
 82
 83	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
 84	if (env->cpu == NULL)
 85		return -ENOMEM;
 86
 87	for (cpu = 0; cpu < nr_cpus; ++cpu) {
 88		env->cpu[cpu].core_id	= cpu_map__get_core_id(cpu);
 89		env->cpu[cpu].socket_id	= cpu_map__get_socket_id(cpu);
 
 90	}
 91
 92	env->nr_cpus_avail = nr_cpus;
 93	return 0;
 94}
 95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96void cpu_cache_level__free(struct cpu_cache_level *cache)
 97{
 98	free(cache->type);
 99	free(cache->map);
100	free(cache->size);
101}
102
103/*
104 * Return architecture name in a normalized form.
105 * The conversion logic comes from the Makefile.
106 */
107static const char *normalize_arch(char *arch)
108{
109	if (!strcmp(arch, "x86_64"))
110		return "x86";
111	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
112		return "x86";
113	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
114		return "sparc";
115	if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
116		return "arm64";
117	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
118		return "arm";
119	if (!strncmp(arch, "s390", 4))
120		return "s390";
121	if (!strncmp(arch, "parisc", 6))
122		return "parisc";
123	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
124		return "powerpc";
125	if (!strncmp(arch, "mips", 4))
126		return "mips";
127	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
128		return "sh";
129
130	return arch;
131}
132
133const char *perf_env__arch(struct perf_env *env)
134{
135	struct utsname uts;
136	char *arch_name;
137
138	if (!env) { /* Assume local operation */
139		if (uname(&uts) < 0)
 
140			return NULL;
141		arch_name = uts.machine;
142	} else
143		arch_name = env->arch;
144
145	return normalize_arch(arch_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146}