Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include "cpumap.h"
  3#include "debug.h"
  4#include "env.h"
 
  5#include <linux/ctype.h>
  6#include <linux/zalloc.h>
  7#include "bpf-event.h"
  8#include <errno.h>
  9#include <sys/utsname.h>
 10#include <bpf/libbpf.h>
 11#include <stdlib.h>
 12#include <string.h>
 13
 14struct perf_env perf_env;
 15
 
 
 
 
 16void perf_env__insert_bpf_prog_info(struct perf_env *env,
 17				    struct bpf_prog_info_node *info_node)
 18{
 19	__u32 prog_id = info_node->info_linear->info.id;
 20	struct bpf_prog_info_node *node;
 21	struct rb_node *parent = NULL;
 22	struct rb_node **p;
 23
 24	down_write(&env->bpf_progs.lock);
 25	p = &env->bpf_progs.infos.rb_node;
 26
 27	while (*p != NULL) {
 28		parent = *p;
 29		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
 30		if (prog_id < node->info_linear->info.id) {
 31			p = &(*p)->rb_left;
 32		} else if (prog_id > node->info_linear->info.id) {
 33			p = &(*p)->rb_right;
 34		} else {
 35			pr_debug("duplicated bpf prog info %u\n", prog_id);
 36			goto out;
 37		}
 38	}
 39
 40	rb_link_node(&info_node->rb_node, parent, p);
 41	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
 42	env->bpf_progs.infos_cnt++;
 43out:
 44	up_write(&env->bpf_progs.lock);
 45}
 46
 47struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
 48							__u32 prog_id)
 49{
 50	struct bpf_prog_info_node *node = NULL;
 51	struct rb_node *n;
 52
 53	down_read(&env->bpf_progs.lock);
 54	n = env->bpf_progs.infos.rb_node;
 55
 56	while (n) {
 57		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
 58		if (prog_id < node->info_linear->info.id)
 59			n = n->rb_left;
 60		else if (prog_id > node->info_linear->info.id)
 61			n = n->rb_right;
 62		else
 63			goto out;
 64	}
 65	node = NULL;
 66
 67out:
 68	up_read(&env->bpf_progs.lock);
 69	return node;
 70}
 71
 72void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
 73{
 74	struct rb_node *parent = NULL;
 75	__u32 btf_id = btf_node->id;
 76	struct btf_node *node;
 77	struct rb_node **p;
 78
 79	down_write(&env->bpf_progs.lock);
 80	p = &env->bpf_progs.btfs.rb_node;
 81
 82	while (*p != NULL) {
 83		parent = *p;
 84		node = rb_entry(parent, struct btf_node, rb_node);
 85		if (btf_id < node->id) {
 86			p = &(*p)->rb_left;
 87		} else if (btf_id > node->id) {
 88			p = &(*p)->rb_right;
 89		} else {
 90			pr_debug("duplicated btf %u\n", btf_id);
 91			goto out;
 92		}
 93	}
 94
 95	rb_link_node(&btf_node->rb_node, parent, p);
 96	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
 97	env->bpf_progs.btfs_cnt++;
 98out:
 99	up_write(&env->bpf_progs.lock);
100}
101
102struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
103{
104	struct btf_node *node = NULL;
105	struct rb_node *n;
106
107	down_read(&env->bpf_progs.lock);
108	n = env->bpf_progs.btfs.rb_node;
109
110	while (n) {
111		node = rb_entry(n, struct btf_node, rb_node);
112		if (btf_id < node->id)
113			n = n->rb_left;
114		else if (btf_id > node->id)
115			n = n->rb_right;
116		else
117			goto out;
118	}
119	node = NULL;
120
121out:
122	up_read(&env->bpf_progs.lock);
123	return node;
124}
125
126/* purge data in bpf_progs.infos tree */
127static void perf_env__purge_bpf(struct perf_env *env)
128{
129	struct rb_root *root;
130	struct rb_node *next;
131
132	down_write(&env->bpf_progs.lock);
133
134	root = &env->bpf_progs.infos;
135	next = rb_first(root);
136
137	while (next) {
138		struct bpf_prog_info_node *node;
139
140		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
141		next = rb_next(&node->rb_node);
142		rb_erase(&node->rb_node, root);
 
143		free(node);
144	}
145
146	env->bpf_progs.infos_cnt = 0;
147
148	root = &env->bpf_progs.btfs;
149	next = rb_first(root);
150
151	while (next) {
152		struct btf_node *node;
153
154		node = rb_entry(next, struct btf_node, rb_node);
155		next = rb_next(&node->rb_node);
156		rb_erase(&node->rb_node, root);
157		free(node);
158	}
159
160	env->bpf_progs.btfs_cnt = 0;
161
162	up_write(&env->bpf_progs.lock);
163}
 
 
 
 
 
164
165void perf_env__exit(struct perf_env *env)
166{
167	int i;
168
169	perf_env__purge_bpf(env);
 
170	zfree(&env->hostname);
171	zfree(&env->os_release);
172	zfree(&env->version);
173	zfree(&env->arch);
174	zfree(&env->cpu_desc);
175	zfree(&env->cpuid);
176	zfree(&env->cmdline);
177	zfree(&env->cmdline_argv);
 
178	zfree(&env->sibling_cores);
179	zfree(&env->sibling_threads);
180	zfree(&env->pmu_mappings);
181	zfree(&env->cpu);
 
 
182
183	for (i = 0; i < env->nr_numa_nodes; i++)
184		perf_cpu_map__put(env->numa_nodes[i].map);
185	zfree(&env->numa_nodes);
186
187	for (i = 0; i < env->caches_cnt; i++)
188		cpu_cache_level__free(&env->caches[i]);
189	zfree(&env->caches);
190
191	for (i = 0; i < env->nr_memory_nodes; i++)
192		zfree(&env->memory_nodes[i].set);
193	zfree(&env->memory_nodes);
 
 
 
 
 
 
 
 
 
 
 
 
194}
195
196void perf_env__init(struct perf_env *env)
197{
 
198	env->bpf_progs.infos = RB_ROOT;
199	env->bpf_progs.btfs = RB_ROOT;
200	init_rwsem(&env->bpf_progs.lock);
 
201}
202
203int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
204{
205	int i;
206
207	/* do not include NULL termination */
208	env->cmdline_argv = calloc(argc, sizeof(char *));
209	if (env->cmdline_argv == NULL)
210		goto out_enomem;
211
212	/*
213	 * Must copy argv contents because it gets moved around during option
214	 * parsing:
215	 */
216	for (i = 0; i < argc ; i++) {
217		env->cmdline_argv[i] = argv[i];
218		if (env->cmdline_argv[i] == NULL)
219			goto out_free;
220	}
221
222	env->nr_cmdline = argc;
223
224	return 0;
225out_free:
226	zfree(&env->cmdline_argv);
227out_enomem:
228	return -ENOMEM;
229}
230
231int perf_env__read_cpu_topology_map(struct perf_env *env)
232{
233	int cpu, nr_cpus;
234
235	if (env->cpu != NULL)
236		return 0;
237
238	if (env->nr_cpus_avail == 0)
239		env->nr_cpus_avail = cpu__max_present_cpu();
240
241	nr_cpus = env->nr_cpus_avail;
242	if (nr_cpus == -1)
243		return -EINVAL;
244
245	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
246	if (env->cpu == NULL)
247		return -ENOMEM;
248
249	for (cpu = 0; cpu < nr_cpus; ++cpu) {
250		env->cpu[cpu].core_id	= cpu_map__get_core_id(cpu);
251		env->cpu[cpu].socket_id	= cpu_map__get_socket_id(cpu);
252		env->cpu[cpu].die_id	= cpu_map__get_die_id(cpu);
253	}
254
255	env->nr_cpus_avail = nr_cpus;
256	return 0;
257}
258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259static int perf_env__read_arch(struct perf_env *env)
260{
261	struct utsname uts;
262
263	if (env->arch)
264		return 0;
265
266	if (!uname(&uts))
267		env->arch = strdup(uts.machine);
268
269	return env->arch ? 0 : -ENOMEM;
270}
271
272static int perf_env__read_nr_cpus_avail(struct perf_env *env)
273{
274	if (env->nr_cpus_avail == 0)
275		env->nr_cpus_avail = cpu__max_present_cpu();
276
277	return env->nr_cpus_avail ? 0 : -ENOENT;
278}
279
280const char *perf_env__raw_arch(struct perf_env *env)
281{
282	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
283}
284
285int perf_env__nr_cpus_avail(struct perf_env *env)
286{
287	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
288}
289
290void cpu_cache_level__free(struct cpu_cache_level *cache)
291{
292	zfree(&cache->type);
293	zfree(&cache->map);
294	zfree(&cache->size);
295}
296
297/*
298 * Return architecture name in a normalized form.
299 * The conversion logic comes from the Makefile.
300 */
301static const char *normalize_arch(char *arch)
302{
303	if (!strcmp(arch, "x86_64"))
304		return "x86";
305	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
306		return "x86";
307	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
308		return "sparc";
309	if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
310		return "arm64";
311	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
312		return "arm";
313	if (!strncmp(arch, "s390", 4))
314		return "s390";
315	if (!strncmp(arch, "parisc", 6))
316		return "parisc";
317	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
318		return "powerpc";
319	if (!strncmp(arch, "mips", 4))
320		return "mips";
321	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
322		return "sh";
323
324	return arch;
325}
326
327const char *perf_env__arch(struct perf_env *env)
328{
329	struct utsname uts;
330	char *arch_name;
331
332	if (!env || !env->arch) { /* Assume local operation */
333		if (uname(&uts) < 0)
 
334			return NULL;
335		arch_name = uts.machine;
336	} else
337		arch_name = env->arch;
338
339	return normalize_arch(arch_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include "cpumap.h"
  3#include "debug.h"
  4#include "env.h"
  5#include "util/header.h"
  6#include <linux/ctype.h>
  7#include <linux/zalloc.h>
  8#include "cgroup.h"
  9#include <errno.h>
 10#include <sys/utsname.h>
 
 11#include <stdlib.h>
 12#include <string.h>
 13
 14struct perf_env perf_env;
 15
 16#ifdef HAVE_LIBBPF_SUPPORT
 17#include "bpf-event.h"
 18#include <bpf/libbpf.h>
 19
 20void perf_env__insert_bpf_prog_info(struct perf_env *env,
 21				    struct bpf_prog_info_node *info_node)
 22{
 23	__u32 prog_id = info_node->info_linear->info.id;
 24	struct bpf_prog_info_node *node;
 25	struct rb_node *parent = NULL;
 26	struct rb_node **p;
 27
 28	down_write(&env->bpf_progs.lock);
 29	p = &env->bpf_progs.infos.rb_node;
 30
 31	while (*p != NULL) {
 32		parent = *p;
 33		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
 34		if (prog_id < node->info_linear->info.id) {
 35			p = &(*p)->rb_left;
 36		} else if (prog_id > node->info_linear->info.id) {
 37			p = &(*p)->rb_right;
 38		} else {
 39			pr_debug("duplicated bpf prog info %u\n", prog_id);
 40			goto out;
 41		}
 42	}
 43
 44	rb_link_node(&info_node->rb_node, parent, p);
 45	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
 46	env->bpf_progs.infos_cnt++;
 47out:
 48	up_write(&env->bpf_progs.lock);
 49}
 50
 51struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
 52							__u32 prog_id)
 53{
 54	struct bpf_prog_info_node *node = NULL;
 55	struct rb_node *n;
 56
 57	down_read(&env->bpf_progs.lock);
 58	n = env->bpf_progs.infos.rb_node;
 59
 60	while (n) {
 61		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
 62		if (prog_id < node->info_linear->info.id)
 63			n = n->rb_left;
 64		else if (prog_id > node->info_linear->info.id)
 65			n = n->rb_right;
 66		else
 67			goto out;
 68	}
 69	node = NULL;
 70
 71out:
 72	up_read(&env->bpf_progs.lock);
 73	return node;
 74}
 75
 76void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
 77{
 78	struct rb_node *parent = NULL;
 79	__u32 btf_id = btf_node->id;
 80	struct btf_node *node;
 81	struct rb_node **p;
 82
 83	down_write(&env->bpf_progs.lock);
 84	p = &env->bpf_progs.btfs.rb_node;
 85
 86	while (*p != NULL) {
 87		parent = *p;
 88		node = rb_entry(parent, struct btf_node, rb_node);
 89		if (btf_id < node->id) {
 90			p = &(*p)->rb_left;
 91		} else if (btf_id > node->id) {
 92			p = &(*p)->rb_right;
 93		} else {
 94			pr_debug("duplicated btf %u\n", btf_id);
 95			goto out;
 96		}
 97	}
 98
 99	rb_link_node(&btf_node->rb_node, parent, p);
100	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
101	env->bpf_progs.btfs_cnt++;
102out:
103	up_write(&env->bpf_progs.lock);
104}
105
106struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
107{
108	struct btf_node *node = NULL;
109	struct rb_node *n;
110
111	down_read(&env->bpf_progs.lock);
112	n = env->bpf_progs.btfs.rb_node;
113
114	while (n) {
115		node = rb_entry(n, struct btf_node, rb_node);
116		if (btf_id < node->id)
117			n = n->rb_left;
118		else if (btf_id > node->id)
119			n = n->rb_right;
120		else
121			goto out;
122	}
123	node = NULL;
124
125out:
126	up_read(&env->bpf_progs.lock);
127	return node;
128}
129
130/* purge data in bpf_progs.infos tree */
131static void perf_env__purge_bpf(struct perf_env *env)
132{
133	struct rb_root *root;
134	struct rb_node *next;
135
136	down_write(&env->bpf_progs.lock);
137
138	root = &env->bpf_progs.infos;
139	next = rb_first(root);
140
141	while (next) {
142		struct bpf_prog_info_node *node;
143
144		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
145		next = rb_next(&node->rb_node);
146		rb_erase(&node->rb_node, root);
147		free(node->info_linear);
148		free(node);
149	}
150
151	env->bpf_progs.infos_cnt = 0;
152
153	root = &env->bpf_progs.btfs;
154	next = rb_first(root);
155
156	while (next) {
157		struct btf_node *node;
158
159		node = rb_entry(next, struct btf_node, rb_node);
160		next = rb_next(&node->rb_node);
161		rb_erase(&node->rb_node, root);
162		free(node);
163	}
164
165	env->bpf_progs.btfs_cnt = 0;
166
167	up_write(&env->bpf_progs.lock);
168}
169#else // HAVE_LIBBPF_SUPPORT
170static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
171{
172}
173#endif // HAVE_LIBBPF_SUPPORT
174
175void perf_env__exit(struct perf_env *env)
176{
177	int i;
178
179	perf_env__purge_bpf(env);
180	perf_env__purge_cgroups(env);
181	zfree(&env->hostname);
182	zfree(&env->os_release);
183	zfree(&env->version);
184	zfree(&env->arch);
185	zfree(&env->cpu_desc);
186	zfree(&env->cpuid);
187	zfree(&env->cmdline);
188	zfree(&env->cmdline_argv);
189	zfree(&env->sibling_dies);
190	zfree(&env->sibling_cores);
191	zfree(&env->sibling_threads);
192	zfree(&env->pmu_mappings);
193	zfree(&env->cpu);
194	zfree(&env->cpu_pmu_caps);
195	zfree(&env->numa_map);
196
197	for (i = 0; i < env->nr_numa_nodes; i++)
198		perf_cpu_map__put(env->numa_nodes[i].map);
199	zfree(&env->numa_nodes);
200
201	for (i = 0; i < env->caches_cnt; i++)
202		cpu_cache_level__free(&env->caches[i]);
203	zfree(&env->caches);
204
205	for (i = 0; i < env->nr_memory_nodes; i++)
206		zfree(&env->memory_nodes[i].set);
207	zfree(&env->memory_nodes);
208
209	for (i = 0; i < env->nr_hybrid_nodes; i++) {
210		zfree(&env->hybrid_nodes[i].pmu_name);
211		zfree(&env->hybrid_nodes[i].cpus);
212	}
213	zfree(&env->hybrid_nodes);
214
215	for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
216		zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
217		zfree(&env->hybrid_cpc_nodes[i].pmu_name);
218	}
219	zfree(&env->hybrid_cpc_nodes);
220}
221
222void perf_env__init(struct perf_env *env __maybe_unused)
223{
224#ifdef HAVE_LIBBPF_SUPPORT
225	env->bpf_progs.infos = RB_ROOT;
226	env->bpf_progs.btfs = RB_ROOT;
227	init_rwsem(&env->bpf_progs.lock);
228#endif
229}
230
231int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
232{
233	int i;
234
235	/* do not include NULL termination */
236	env->cmdline_argv = calloc(argc, sizeof(char *));
237	if (env->cmdline_argv == NULL)
238		goto out_enomem;
239
240	/*
241	 * Must copy argv contents because it gets moved around during option
242	 * parsing:
243	 */
244	for (i = 0; i < argc ; i++) {
245		env->cmdline_argv[i] = argv[i];
246		if (env->cmdline_argv[i] == NULL)
247			goto out_free;
248	}
249
250	env->nr_cmdline = argc;
251
252	return 0;
253out_free:
254	zfree(&env->cmdline_argv);
255out_enomem:
256	return -ENOMEM;
257}
258
259int perf_env__read_cpu_topology_map(struct perf_env *env)
260{
261	int cpu, nr_cpus;
262
263	if (env->cpu != NULL)
264		return 0;
265
266	if (env->nr_cpus_avail == 0)
267		env->nr_cpus_avail = cpu__max_present_cpu();
268
269	nr_cpus = env->nr_cpus_avail;
270	if (nr_cpus == -1)
271		return -EINVAL;
272
273	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
274	if (env->cpu == NULL)
275		return -ENOMEM;
276
277	for (cpu = 0; cpu < nr_cpus; ++cpu) {
278		env->cpu[cpu].core_id	= cpu_map__get_core_id(cpu);
279		env->cpu[cpu].socket_id	= cpu_map__get_socket_id(cpu);
280		env->cpu[cpu].die_id	= cpu_map__get_die_id(cpu);
281	}
282
283	env->nr_cpus_avail = nr_cpus;
284	return 0;
285}
286
287int perf_env__read_cpuid(struct perf_env *env)
288{
289	char cpuid[128];
290	int err = get_cpuid(cpuid, sizeof(cpuid));
291
292	if (err)
293		return err;
294
295	free(env->cpuid);
296	env->cpuid = strdup(cpuid);
297	if (env->cpuid == NULL)
298		return ENOMEM;
299	return 0;
300}
301
302static int perf_env__read_arch(struct perf_env *env)
303{
304	struct utsname uts;
305
306	if (env->arch)
307		return 0;
308
309	if (!uname(&uts))
310		env->arch = strdup(uts.machine);
311
312	return env->arch ? 0 : -ENOMEM;
313}
314
315static int perf_env__read_nr_cpus_avail(struct perf_env *env)
316{
317	if (env->nr_cpus_avail == 0)
318		env->nr_cpus_avail = cpu__max_present_cpu();
319
320	return env->nr_cpus_avail ? 0 : -ENOENT;
321}
322
323const char *perf_env__raw_arch(struct perf_env *env)
324{
325	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
326}
327
328int perf_env__nr_cpus_avail(struct perf_env *env)
329{
330	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
331}
332
333void cpu_cache_level__free(struct cpu_cache_level *cache)
334{
335	zfree(&cache->type);
336	zfree(&cache->map);
337	zfree(&cache->size);
338}
339
340/*
341 * Return architecture name in a normalized form.
342 * The conversion logic comes from the Makefile.
343 */
344static const char *normalize_arch(char *arch)
345{
346	if (!strcmp(arch, "x86_64"))
347		return "x86";
348	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
349		return "x86";
350	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
351		return "sparc";
352	if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
353		return "arm64";
354	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
355		return "arm";
356	if (!strncmp(arch, "s390", 4))
357		return "s390";
358	if (!strncmp(arch, "parisc", 6))
359		return "parisc";
360	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
361		return "powerpc";
362	if (!strncmp(arch, "mips", 4))
363		return "mips";
364	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
365		return "sh";
366
367	return arch;
368}
369
370const char *perf_env__arch(struct perf_env *env)
371{
 
372	char *arch_name;
373
374	if (!env || !env->arch) { /* Assume local operation */
375		static struct utsname uts = { .machine[0] = '\0', };
376		if (uts.machine[0] == '\0' && uname(&uts) < 0)
377			return NULL;
378		arch_name = uts.machine;
379	} else
380		arch_name = env->arch;
381
382	return normalize_arch(arch_name);
383}
384
385
386int perf_env__numa_node(struct perf_env *env, int cpu)
387{
388	if (!env->nr_numa_map) {
389		struct numa_node *nn;
390		int i, nr = 0;
391
392		for (i = 0; i < env->nr_numa_nodes; i++) {
393			nn = &env->numa_nodes[i];
394			nr = max(nr, perf_cpu_map__max(nn->map));
395		}
396
397		nr++;
398
399		/*
400		 * We initialize the numa_map array to prepare
401		 * it for missing cpus, which return node -1
402		 */
403		env->numa_map = malloc(nr * sizeof(int));
404		if (!env->numa_map)
405			return -1;
406
407		for (i = 0; i < nr; i++)
408			env->numa_map[i] = -1;
409
410		env->nr_numa_map = nr;
411
412		for (i = 0; i < env->nr_numa_nodes; i++) {
413			int tmp, j;
414
415			nn = &env->numa_nodes[i];
416			perf_cpu_map__for_each_cpu(j, tmp, nn->map)
417				env->numa_map[j] = i;
418		}
419	}
420
421	return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
422}