Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.6
 
  1#include "symbol.h"
 
  2#include <errno.h>
  3#include <inttypes.h>
  4#include <limits.h>
  5#include <stdlib.h>
  6#include <string.h>
  7#include <stdio.h>
  8#include <unistd.h>
 
 
  9#include "map.h"
 
 10#include "thread.h"
 11#include "strlist.h"
 12#include "vdso.h"
 13#include "build-id.h"
 14#include "util.h"
 15#include "debug.h"
 16#include "machine.h"
 17#include <linux/string.h>
 
 
 
 
 
 
 18
 19static void __maps__insert(struct maps *maps, struct map *map);
 
 20
 21const char *map_type__name[MAP__NR_TYPES] = {
 22	[MAP__FUNCTION] = "Functions",
 23	[MAP__VARIABLE] = "Variables",
 24};
 25
 26static inline int is_anon_memory(const char *filename)
 27{
 28	return !strcmp(filename, "//anon") ||
 
 29	       !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
 30	       !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
 31}
 32
 33static inline int is_no_dso_memory(const char *filename)
 34{
 35	return !strncmp(filename, "[stack", 6) ||
 36	       !strncmp(filename, "/SYSV",5)   ||
 37	       !strcmp(filename, "[heap]");
 38}
 39
 40static inline int is_android_lib(const char *filename)
 41{
 42	return !strncmp(filename, "/data/app-lib", 13) ||
 43	       !strncmp(filename, "/system/lib", 11);
 44}
 45
 46static inline bool replace_android_lib(const char *filename, char *newfilename)
 47{
 48	const char *libname;
 49	char *app_abi;
 50	size_t app_abi_length, new_length;
 51	size_t lib_length = 0;
 52
 53	libname  = strrchr(filename, '/');
 54	if (libname)
 55		lib_length = strlen(libname);
 56
 57	app_abi = getenv("APP_ABI");
 58	if (!app_abi)
 59		return false;
 60
 61	app_abi_length = strlen(app_abi);
 62
 63	if (!strncmp(filename, "/data/app-lib", 13)) {
 64		char *apk_path;
 65
 66		if (!app_abi_length)
 67			return false;
 68
 69		new_length = 7 + app_abi_length + lib_length;
 70
 71		apk_path = getenv("APK_PATH");
 72		if (apk_path) {
 73			new_length += strlen(apk_path) + 1;
 74			if (new_length > PATH_MAX)
 75				return false;
 76			snprintf(newfilename, new_length,
 77				 "%s/libs/%s/%s", apk_path, app_abi, libname);
 78		} else {
 79			if (new_length > PATH_MAX)
 80				return false;
 81			snprintf(newfilename, new_length,
 82				 "libs/%s/%s", app_abi, libname);
 83		}
 84		return true;
 85	}
 86
 87	if (!strncmp(filename, "/system/lib/", 11)) {
 88		char *ndk, *app;
 89		const char *arch;
 90		size_t ndk_length;
 91		size_t app_length;
 92
 93		ndk = getenv("NDK_ROOT");
 94		app = getenv("APP_PLATFORM");
 95
 96		if (!(ndk && app))
 97			return false;
 98
 99		ndk_length = strlen(ndk);
100		app_length = strlen(app);
101
102		if (!(ndk_length && app_length && app_abi_length))
103			return false;
104
105		arch = !strncmp(app_abi, "arm", 3) ? "arm" :
106		       !strncmp(app_abi, "mips", 4) ? "mips" :
107		       !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
108
109		if (!arch)
110			return false;
111
112		new_length = 27 + ndk_length +
113			     app_length + lib_length
114			   + strlen(arch);
115
116		if (new_length > PATH_MAX)
117			return false;
118		snprintf(newfilename, new_length,
119			"%s/platforms/%s/arch-%s/usr/lib/%s",
120			ndk, app, arch, libname);
121
122		return true;
123	}
124	return false;
125}
126
127void map__init(struct map *map, enum map_type type,
128	       u64 start, u64 end, u64 pgoff, struct dso *dso)
129{
130	map->type     = type;
131	map->start    = start;
132	map->end      = end;
133	map->pgoff    = pgoff;
134	map->reloc    = 0;
135	map->dso      = dso__get(dso);
136	map->map_ip   = map__map_ip;
137	map->unmap_ip = map__unmap_ip;
138	RB_CLEAR_NODE(&map->rb_node);
139	map->groups   = NULL;
140	map->erange_warned = false;
141	atomic_set(&map->refcnt, 1);
142}
143
144struct map *map__new(struct machine *machine, u64 start, u64 len,
145		     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
146		     u64 ino_gen, u32 prot, u32 flags, char *filename,
147		     enum map_type type, struct thread *thread)
148{
149	struct map *map = malloc(sizeof(*map));
 
 
150
151	if (map != NULL) {
152		char newfilename[PATH_MAX];
153		struct dso *dso;
154		int anon, no_dso, vdso, android;
155
156		android = is_android_lib(filename);
157		anon = is_anon_memory(filename);
158		vdso = is_vdso_map(filename);
159		no_dso = is_no_dso_memory(filename);
160
161		map->maj = d_maj;
162		map->min = d_min;
163		map->ino = ino;
164		map->ino_generation = ino_gen;
165		map->prot = prot;
166		map->flags = flags;
 
167
168		if ((anon || no_dso) && type == MAP__FUNCTION) {
169			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
 
170			filename = newfilename;
171		}
172
173		if (android) {
174			if (replace_android_lib(filename, newfilename))
175				filename = newfilename;
176		}
177
178		if (vdso) {
 
 
 
 
 
 
 
 
 
 
179			pgoff = 0;
180			dso = machine__findnew_vdso(machine, thread);
181		} else
182			dso = machine__findnew_dso(machine, filename);
183
184		if (dso == NULL)
185			goto out_delete;
186
187		map__init(map, type, start, start + len, pgoff, dso);
188
189		if (anon || no_dso) {
190			map->map_ip = map->unmap_ip = identity__map_ip;
191
192			/*
193			 * Set memory without DSO as loaded. All map__find_*
194			 * functions still return NULL, and we avoid the
195			 * unnecessary map__load warning.
196			 */
197			if (type != MAP__FUNCTION)
198				dso__set_loaded(dso, map->type);
199		}
 
200		dso__put(dso);
201	}
202	return map;
203out_delete:
 
204	free(map);
205	return NULL;
206}
207
208/*
209 * Constructor variant for modules (where we know from /proc/modules where
210 * they are loaded) and for vmlinux, where only after we load all the
211 * symbols we'll know where it starts and ends.
212 */
213struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
214{
215	struct map *map = calloc(1, (sizeof(*map) +
216				     (dso->kernel ? sizeof(struct kmap) : 0)));
217	if (map != NULL) {
218		/*
219		 * ->end will be filled after we load all the symbols
220		 */
221		map__init(map, type, start, 0, 0, dso);
222	}
223
224	return map;
225}
226
227/*
228 * Use this and __map__is_kmodule() for map instances that are in
229 * machine->kmaps, and thus have map->groups->machine all properly set, to
230 * disambiguate between the kernel and modules.
231 *
232 * When the need arises, introduce map__is_{kernel,kmodule)() that
233 * checks (map->groups != NULL && map->groups->machine != NULL &&
234 * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
235 */
236bool __map__is_kernel(const struct map *map)
237{
238	return __machine__kernel_map(map->groups->machine, map->type) == map;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239}
240
241static void map__exit(struct map *map)
242{
243	BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
244	dso__zput(map->dso);
245}
246
247void map__delete(struct map *map)
248{
249	map__exit(map);
250	free(map);
251}
252
253void map__put(struct map *map)
254{
255	if (map && atomic_dec_and_test(&map->refcnt))
256		map__delete(map);
257}
258
259void map__fixup_start(struct map *map)
260{
261	struct rb_root *symbols = &map->dso->symbols[map->type];
262	struct rb_node *nd = rb_first(symbols);
263	if (nd != NULL) {
264		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
265		map->start = sym->start;
266	}
267}
268
269void map__fixup_end(struct map *map)
270{
271	struct rb_root *symbols = &map->dso->symbols[map->type];
272	struct rb_node *nd = rb_last(symbols);
273	if (nd != NULL) {
274		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
275		map->end = sym->end;
276	}
277}
278
279#define DSO__DELETED "(deleted)"
280
281int map__load(struct map *map, symbol_filter_t filter)
282{
283	const char *name = map->dso->long_name;
284	int nr;
285
286	if (dso__loaded(map->dso, map->type))
287		return 0;
288
289	nr = dso__load(map->dso, map, filter);
290	if (nr < 0) {
291		if (map->dso->has_build_id) {
292			char sbuild_id[BUILD_ID_SIZE * 2 + 1];
293
294			build_id__sprintf(map->dso->build_id,
295					  sizeof(map->dso->build_id),
296					  sbuild_id);
297			pr_warning("%s with build id %s not found",
298				   name, sbuild_id);
299		} else
300			pr_warning("Failed to open %s", name);
301
302		pr_warning(", continuing without symbols\n");
303		return -1;
304	} else if (nr == 0) {
305#ifdef HAVE_LIBELF_SUPPORT
306		const size_t len = strlen(name);
307		const size_t real_len = len - sizeof(DSO__DELETED);
308
309		if (len > sizeof(DSO__DELETED) &&
310		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
311			pr_warning("%.*s was updated (is prelink enabled?). "
312				"Restart the long running apps that use it!\n",
313				   (int)real_len, name);
314		} else {
315			pr_warning("no symbols found in %s, maybe install "
316				   "a debug package?\n", name);
317		}
318#endif
319		return -1;
320	}
321
322	return 0;
323}
324
325int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
326{
327	return strcmp(namea, nameb);
328}
329
330struct symbol *map__find_symbol(struct map *map, u64 addr,
331				symbol_filter_t filter)
332{
333	if (map__load(map, filter) < 0)
334		return NULL;
335
336	return dso__find_symbol(map->dso, map->type, addr);
337}
338
339struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
340					symbol_filter_t filter)
341{
342	if (map__load(map, filter) < 0)
343		return NULL;
344
345	if (!dso__sorted_by_name(map->dso, map->type))
346		dso__sort_by_name(map->dso, map->type);
347
348	return dso__find_symbol_by_name(map->dso, map->type, name);
349}
350
351struct map *map__clone(struct map *from)
352{
353	struct map *map = memdup(from, sizeof(*map));
354
355	if (map != NULL) {
356		atomic_set(&map->refcnt, 1);
357		RB_CLEAR_NODE(&map->rb_node);
358		dso__get(map->dso);
359		map->groups = NULL;
360	}
361
362	return map;
363}
364
365int map__overlap(struct map *l, struct map *r)
366{
367	if (l->start > r->start) {
368		struct map *t = l;
369		l = r;
370		r = t;
371	}
372
373	if (l->end > r->start)
374		return 1;
375
376	return 0;
377}
378
379size_t map__fprintf(struct map *map, FILE *fp)
380{
381	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
382		       map->start, map->end, map->pgoff, map->dso->name);
383}
384
385size_t map__fprintf_dsoname(struct map *map, FILE *fp)
386{
 
387	const char *dsoname = "[unknown]";
388
389	if (map && map->dso && (map->dso->name || map->dso->long_name)) {
390		if (symbol_conf.show_kernel_path && map->dso->long_name)
391			dsoname = map->dso->long_name;
392		else if (map->dso->name)
393			dsoname = map->dso->name;
394	}
395
 
 
 
 
 
396	return fprintf(fp, "%s", dsoname);
397}
398
 
 
 
 
 
 
 
399int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
400			 FILE *fp)
401{
402	char *srcline;
403	int ret = 0;
404
405	if (map && map->dso) {
406		srcline = get_srcline(map->dso,
407				      map__rip_2objdump(map, addr), NULL, true);
408		if (srcline != SRCLINE_UNKNOWN)
409			ret = fprintf(fp, "%s%s", prefix, srcline);
410		free_srcline(srcline);
411	}
412	return ret;
413}
414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415/**
416 * map__rip_2objdump - convert symbol start address to objdump address.
417 * @map: memory map
418 * @rip: symbol start address
419 *
420 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
421 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
422 * relative to section start.
423 *
424 * Return: Address suitable for passing to "objdump --start-address="
425 */
426u64 map__rip_2objdump(struct map *map, u64 rip)
427{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428	if (!map->dso->adjust_symbols)
429		return rip;
430
431	if (map->dso->rel)
432		return rip - map->pgoff;
433
 
 
 
 
 
 
 
434	return map->unmap_ip(map, rip) - map->reloc;
435}
436
437/**
438 * map__objdump_2mem - convert objdump address to a memory address.
439 * @map: memory map
440 * @ip: objdump address
441 *
442 * Closely related to map__rip_2objdump(), this function takes an address from
443 * objdump and converts it to a memory address.  Note this assumes that @map
444 * contains the address.  To be sure the result is valid, check it forwards
445 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
446 *
447 * Return: Memory address.
448 */
449u64 map__objdump_2mem(struct map *map, u64 ip)
450{
451	if (!map->dso->adjust_symbols)
452		return map->unmap_ip(map, ip);
453
454	if (map->dso->rel)
455		return map->unmap_ip(map, ip + map->pgoff);
456
 
 
 
 
 
 
 
457	return ip + map->reloc;
458}
459
460static void maps__init(struct maps *maps)
461{
462	maps->entries = RB_ROOT;
463	pthread_rwlock_init(&maps->lock, NULL);
 
464}
465
466void map_groups__init(struct map_groups *mg, struct machine *machine)
467{
468	int i;
469	for (i = 0; i < MAP__NR_TYPES; ++i) {
470		maps__init(&mg->maps[i]);
471	}
472	mg->machine = machine;
473	atomic_set(&mg->refcnt, 1);
 
 
 
 
 
 
474}
475
476static void __maps__purge(struct maps *maps)
477{
478	struct rb_root *root = &maps->entries;
479	struct rb_node *next = rb_first(root);
480
481	while (next) {
482		struct map *pos = rb_entry(next, struct map, rb_node);
483
484		next = rb_next(&pos->rb_node);
485		rb_erase_init(&pos->rb_node, root);
486		map__put(pos);
487	}
488}
489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490static void maps__exit(struct maps *maps)
491{
492	pthread_rwlock_wrlock(&maps->lock);
493	__maps__purge(maps);
494	pthread_rwlock_unlock(&maps->lock);
 
495}
496
497void map_groups__exit(struct map_groups *mg)
498{
499	int i;
500
501	for (i = 0; i < MAP__NR_TYPES; ++i)
502		maps__exit(&mg->maps[i]);
503}
504
505bool map_groups__empty(struct map_groups *mg)
506{
507	int i;
508
509	for (i = 0; i < MAP__NR_TYPES; ++i) {
510		if (maps__first(&mg->maps[i]))
511			return false;
512	}
513
514	return true;
515}
516
517struct map_groups *map_groups__new(struct machine *machine)
518{
519	struct map_groups *mg = malloc(sizeof(*mg));
520
521	if (mg != NULL)
522		map_groups__init(mg, machine);
523
524	return mg;
525}
526
527void map_groups__delete(struct map_groups *mg)
528{
529	map_groups__exit(mg);
 
530	free(mg);
531}
532
533void map_groups__put(struct map_groups *mg)
534{
535	if (mg && atomic_dec_and_test(&mg->refcnt))
536		map_groups__delete(mg);
537}
538
539struct symbol *map_groups__find_symbol(struct map_groups *mg,
540				       enum map_type type, u64 addr,
541				       struct map **mapp,
542				       symbol_filter_t filter)
543{
544	struct map *map = map_groups__find(mg, type, addr);
545
546	/* Ensure map is loaded before using map->map_ip */
547	if (map != NULL && map__load(map, filter) >= 0) {
548		if (mapp != NULL)
549			*mapp = map;
550		return map__find_symbol(map, map->map_ip(map, addr), filter);
551	}
552
553	return NULL;
554}
555
 
 
 
 
 
 
 
556struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
557					 struct map **mapp, symbol_filter_t filter)
558{
559	struct symbol *sym;
560	struct rb_node *nd;
561
562	pthread_rwlock_rdlock(&maps->lock);
563
564	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
565		struct map *pos = rb_entry(nd, struct map, rb_node);
566
567		sym = map__find_symbol_by_name(pos, name, filter);
568
569		if (sym == NULL)
570			continue;
 
 
 
 
571		if (mapp != NULL)
572			*mapp = pos;
573		goto out;
574	}
575
576	sym = NULL;
577out:
578	pthread_rwlock_unlock(&maps->lock);
579	return sym;
580}
581
582struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
583					       enum map_type type,
584					       const char *name,
585					       struct map **mapp,
586					       symbol_filter_t filter)
587{
588	struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
589
590	return sym;
591}
592
593int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
594{
595	if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
596		if (ams->map->groups == NULL)
597			return -1;
598		ams->map = map_groups__find(ams->map->groups, ams->map->type,
599					    ams->addr);
600		if (ams->map == NULL)
601			return -1;
602	}
603
604	ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
605	ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
606
607	return ams->sym ? 0 : -1;
608}
609
610static size_t maps__fprintf(struct maps *maps, FILE *fp)
611{
612	size_t printed = 0;
613	struct rb_node *nd;
614
615	pthread_rwlock_rdlock(&maps->lock);
616
617	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
618		struct map *pos = rb_entry(nd, struct map, rb_node);
619		printed += fprintf(fp, "Map:");
620		printed += map__fprintf(pos, fp);
621		if (verbose > 2) {
622			printed += dso__fprintf(pos->dso, pos->type, fp);
623			printed += fprintf(fp, "--\n");
624		}
625	}
626
627	pthread_rwlock_unlock(&maps->lock);
628
629	return printed;
630}
631
632size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
633				  FILE *fp)
634{
635	size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
636	return printed += maps__fprintf(&mg->maps[type], fp);
637}
638
639size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
640{
641	size_t printed = 0, i;
642	for (i = 0; i < MAP__NR_TYPES; ++i)
643		printed += __map_groups__fprintf_maps(mg, i, fp);
644	return printed;
645}
646
647static void __map_groups__insert(struct map_groups *mg, struct map *map)
648{
649	__maps__insert(&mg->maps[map->type], map);
 
650	map->groups = mg;
651}
652
653static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
654{
655	struct rb_root *root;
656	struct rb_node *next;
657	int err = 0;
658
659	pthread_rwlock_wrlock(&maps->lock);
660
661	root = &maps->entries;
662	next = rb_first(root);
663
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664	while (next) {
665		struct map *pos = rb_entry(next, struct map, rb_node);
666		next = rb_next(&pos->rb_node);
667
668		if (!map__overlap(pos, map))
669			continue;
 
 
 
 
670
671		if (verbose >= 2) {
672			fputs("overlapping maps:\n", fp);
673			map__fprintf(map, fp);
674			map__fprintf(pos, fp);
 
 
 
 
 
 
675		}
676
677		rb_erase_init(&pos->rb_node, root);
678		/*
679		 * Now check if we need to create new maps for areas not
680		 * overlapped by the new map:
681		 */
682		if (map->start > pos->start) {
683			struct map *before = map__clone(pos);
684
685			if (before == NULL) {
686				err = -ENOMEM;
687				goto put_map;
688			}
689
690			before->end = map->start;
691			__map_groups__insert(pos->groups, before);
692			if (verbose >= 2)
693				map__fprintf(before, fp);
694			map__put(before);
695		}
696
697		if (map->end < pos->end) {
698			struct map *after = map__clone(pos);
699
700			if (after == NULL) {
701				err = -ENOMEM;
702				goto put_map;
703			}
704
705			after->start = map->end;
 
 
706			__map_groups__insert(pos->groups, after);
707			if (verbose >= 2)
708				map__fprintf(after, fp);
709			map__put(after);
710		}
711put_map:
712		map__put(pos);
713
714		if (err)
715			goto out;
716	}
717
718	err = 0;
719out:
720	pthread_rwlock_unlock(&maps->lock);
721	return err;
722}
723
724int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
725				   FILE *fp)
726{
727	return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
728}
729
730/*
731 * XXX This should not really _copy_ te maps, but refcount them.
732 */
733int map_groups__clone(struct map_groups *mg,
734		      struct map_groups *parent, enum map_type type)
735{
 
736	int err = -ENOMEM;
737	struct map *map;
738	struct maps *maps = &parent->maps[type];
739
740	pthread_rwlock_rdlock(&maps->lock);
741
742	for (map = maps__first(maps); map; map = map__next(map)) {
743		struct map *new = map__clone(map);
744		if (new == NULL)
745			goto out_unlock;
 
 
 
 
 
746		map_groups__insert(mg, new);
747		map__put(new);
748	}
749
750	err = 0;
751out_unlock:
752	pthread_rwlock_unlock(&maps->lock);
753	return err;
754}
755
756static void __maps__insert(struct maps *maps, struct map *map)
757{
758	struct rb_node **p = &maps->entries.rb_node;
759	struct rb_node *parent = NULL;
760	const u64 ip = map->start;
761	struct map *m;
762
763	while (*p != NULL) {
764		parent = *p;
765		m = rb_entry(parent, struct map, rb_node);
766		if (ip < m->start)
767			p = &(*p)->rb_left;
768		else
769			p = &(*p)->rb_right;
770	}
771
772	rb_link_node(&map->rb_node, parent, p);
773	rb_insert_color(&map->rb_node, &maps->entries);
774	map__get(map);
775}
776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777void maps__insert(struct maps *maps, struct map *map)
778{
779	pthread_rwlock_wrlock(&maps->lock);
780	__maps__insert(maps, map);
781	pthread_rwlock_unlock(&maps->lock);
 
782}
783
784static void __maps__remove(struct maps *maps, struct map *map)
785{
786	rb_erase_init(&map->rb_node, &maps->entries);
787	map__put(map);
 
 
 
788}
789
790void maps__remove(struct maps *maps, struct map *map)
791{
792	pthread_rwlock_wrlock(&maps->lock);
793	__maps__remove(maps, map);
794	pthread_rwlock_unlock(&maps->lock);
795}
796
797struct map *maps__find(struct maps *maps, u64 ip)
798{
799	struct rb_node **p, *parent = NULL;
800	struct map *m;
801
802	pthread_rwlock_rdlock(&maps->lock);
803
804	p = &maps->entries.rb_node;
805	while (*p != NULL) {
806		parent = *p;
807		m = rb_entry(parent, struct map, rb_node);
808		if (ip < m->start)
809			p = &(*p)->rb_left;
810		else if (ip >= m->end)
811			p = &(*p)->rb_right;
812		else
813			goto out;
814	}
815
816	m = NULL;
817out:
818	pthread_rwlock_unlock(&maps->lock);
819	return m;
820}
821
822struct map *maps__first(struct maps *maps)
823{
824	struct rb_node *first = rb_first(&maps->entries);
825
826	if (first)
827		return rb_entry(first, struct map, rb_node);
828	return NULL;
829}
830
831struct map *map__next(struct map *map)
832{
833	struct rb_node *next = rb_next(&map->rb_node);
834
835	if (next)
836		return rb_entry(next, struct map, rb_node);
837	return NULL;
838}
839
840struct kmap *map__kmap(struct map *map)
841{
842	if (!map->dso || !map->dso->kernel) {
843		pr_err("Internal error: map__kmap with a non-kernel map\n");
844		return NULL;
845	}
846	return (struct kmap *)(map + 1);
 
 
 
 
 
 
 
 
 
847}
848
849struct map_groups *map__kmaps(struct map *map)
850{
851	struct kmap *kmap = map__kmap(map);
852
853	if (!kmap || !kmap->kmaps) {
854		pr_err("Internal error: map__kmaps with a non-kernel map\n");
855		return NULL;
856	}
857	return kmap->kmaps;
858}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include "symbol.h"
   3#include <assert.h>
   4#include <errno.h>
   5#include <inttypes.h>
   6#include <limits.h>
   7#include <stdlib.h>
   8#include <string.h>
   9#include <stdio.h>
  10#include <unistd.h>
  11#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  12#include "dso.h"
  13#include "map.h"
  14#include "map_symbol.h"
  15#include "thread.h"
 
  16#include "vdso.h"
  17#include "build-id.h"
 
  18#include "debug.h"
  19#include "machine.h"
  20#include <linux/string.h>
  21#include <linux/zalloc.h>
  22#include "srcline.h"
  23#include "namespaces.h"
  24#include "unwind.h"
  25#include "srccode.h"
  26#include "ui/ui.h"
  27
  28static void __maps__insert(struct maps *maps, struct map *map);
  29static void __maps__insert_name(struct maps *maps, struct map *map);
  30
  31static inline int is_anon_memory(const char *filename, u32 flags)
 
 
 
 
 
  32{
  33	return flags & MAP_HUGETLB ||
  34	       !strcmp(filename, "//anon") ||
  35	       !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
  36	       !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
  37}
  38
  39static inline int is_no_dso_memory(const char *filename)
  40{
  41	return !strncmp(filename, "[stack", 6) ||
  42	       !strncmp(filename, "/SYSV",5)   ||
  43	       !strcmp(filename, "[heap]");
  44}
  45
  46static inline int is_android_lib(const char *filename)
  47{
  48	return !strncmp(filename, "/data/app-lib", 13) ||
  49	       !strncmp(filename, "/system/lib", 11);
  50}
  51
  52static inline bool replace_android_lib(const char *filename, char *newfilename)
  53{
  54	const char *libname;
  55	char *app_abi;
  56	size_t app_abi_length, new_length;
  57	size_t lib_length = 0;
  58
  59	libname  = strrchr(filename, '/');
  60	if (libname)
  61		lib_length = strlen(libname);
  62
  63	app_abi = getenv("APP_ABI");
  64	if (!app_abi)
  65		return false;
  66
  67	app_abi_length = strlen(app_abi);
  68
  69	if (!strncmp(filename, "/data/app-lib", 13)) {
  70		char *apk_path;
  71
  72		if (!app_abi_length)
  73			return false;
  74
  75		new_length = 7 + app_abi_length + lib_length;
  76
  77		apk_path = getenv("APK_PATH");
  78		if (apk_path) {
  79			new_length += strlen(apk_path) + 1;
  80			if (new_length > PATH_MAX)
  81				return false;
  82			snprintf(newfilename, new_length,
  83				 "%s/libs/%s/%s", apk_path, app_abi, libname);
  84		} else {
  85			if (new_length > PATH_MAX)
  86				return false;
  87			snprintf(newfilename, new_length,
  88				 "libs/%s/%s", app_abi, libname);
  89		}
  90		return true;
  91	}
  92
  93	if (!strncmp(filename, "/system/lib/", 11)) {
  94		char *ndk, *app;
  95		const char *arch;
  96		size_t ndk_length;
  97		size_t app_length;
  98
  99		ndk = getenv("NDK_ROOT");
 100		app = getenv("APP_PLATFORM");
 101
 102		if (!(ndk && app))
 103			return false;
 104
 105		ndk_length = strlen(ndk);
 106		app_length = strlen(app);
 107
 108		if (!(ndk_length && app_length && app_abi_length))
 109			return false;
 110
 111		arch = !strncmp(app_abi, "arm", 3) ? "arm" :
 112		       !strncmp(app_abi, "mips", 4) ? "mips" :
 113		       !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
 114
 115		if (!arch)
 116			return false;
 117
 118		new_length = 27 + ndk_length +
 119			     app_length + lib_length
 120			   + strlen(arch);
 121
 122		if (new_length > PATH_MAX)
 123			return false;
 124		snprintf(newfilename, new_length,
 125			"%s/platforms/%s/arch-%s/usr/lib/%s",
 126			ndk, app, arch, libname);
 127
 128		return true;
 129	}
 130	return false;
 131}
 132
 133void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
 
 134{
 
 135	map->start    = start;
 136	map->end      = end;
 137	map->pgoff    = pgoff;
 138	map->reloc    = 0;
 139	map->dso      = dso__get(dso);
 140	map->map_ip   = map__map_ip;
 141	map->unmap_ip = map__unmap_ip;
 142	RB_CLEAR_NODE(&map->rb_node);
 143	map->groups   = NULL;
 144	map->erange_warned = false;
 145	refcount_set(&map->refcnt, 1);
 146}
 147
 148struct map *map__new(struct machine *machine, u64 start, u64 len,
 149		     u64 pgoff, u32 d_maj, u32 d_min, u64 ino,
 150		     u64 ino_gen, u32 prot, u32 flags, char *filename,
 151		     struct thread *thread)
 152{
 153	struct map *map = malloc(sizeof(*map));
 154	struct nsinfo *nsi = NULL;
 155	struct nsinfo *nnsi;
 156
 157	if (map != NULL) {
 158		char newfilename[PATH_MAX];
 159		struct dso *dso;
 160		int anon, no_dso, vdso, android;
 161
 162		android = is_android_lib(filename);
 163		anon = is_anon_memory(filename, flags);
 164		vdso = is_vdso_map(filename);
 165		no_dso = is_no_dso_memory(filename);
 166
 167		map->maj = d_maj;
 168		map->min = d_min;
 169		map->ino = ino;
 170		map->ino_generation = ino_gen;
 171		map->prot = prot;
 172		map->flags = flags;
 173		nsi = nsinfo__get(thread->nsinfo);
 174
 175		if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
 176			snprintf(newfilename, sizeof(newfilename),
 177				 "/tmp/perf-%d.map", nsi->pid);
 178			filename = newfilename;
 179		}
 180
 181		if (android) {
 182			if (replace_android_lib(filename, newfilename))
 183				filename = newfilename;
 184		}
 185
 186		if (vdso) {
 187			/* The vdso maps are always on the host and not the
 188			 * container.  Ensure that we don't use setns to look
 189			 * them up.
 190			 */
 191			nnsi = nsinfo__copy(nsi);
 192			if (nnsi) {
 193				nsinfo__put(nsi);
 194				nnsi->need_setns = false;
 195				nsi = nnsi;
 196			}
 197			pgoff = 0;
 198			dso = machine__findnew_vdso(machine, thread);
 199		} else
 200			dso = machine__findnew_dso(machine, filename);
 201
 202		if (dso == NULL)
 203			goto out_delete;
 204
 205		map__init(map, start, start + len, pgoff, dso);
 206
 207		if (anon || no_dso) {
 208			map->map_ip = map->unmap_ip = identity__map_ip;
 209
 210			/*
 211			 * Set memory without DSO as loaded. All map__find_*
 212			 * functions still return NULL, and we avoid the
 213			 * unnecessary map__load warning.
 214			 */
 215			if (!(prot & PROT_EXEC))
 216				dso__set_loaded(dso);
 217		}
 218		dso->nsinfo = nsi;
 219		dso__put(dso);
 220	}
 221	return map;
 222out_delete:
 223	nsinfo__put(nsi);
 224	free(map);
 225	return NULL;
 226}
 227
 228/*
 229 * Constructor variant for modules (where we know from /proc/modules where
 230 * they are loaded) and for vmlinux, where only after we load all the
 231 * symbols we'll know where it starts and ends.
 232 */
 233struct map *map__new2(u64 start, struct dso *dso)
 234{
 235	struct map *map = calloc(1, (sizeof(*map) +
 236				     (dso->kernel ? sizeof(struct kmap) : 0)));
 237	if (map != NULL) {
 238		/*
 239		 * ->end will be filled after we load all the symbols
 240		 */
 241		map__init(map, start, 0, 0, dso);
 242	}
 243
 244	return map;
 245}
 246
 247/*
 248 * Use this and __map__is_kmodule() for map instances that are in
 249 * machine->kmaps, and thus have map->groups->machine all properly set, to
 250 * disambiguate between the kernel and modules.
 251 *
 252 * When the need arises, introduce map__is_{kernel,kmodule)() that
 253 * checks (map->groups != NULL && map->groups->machine != NULL &&
 254 * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
 255 */
 256bool __map__is_kernel(const struct map *map)
 257{
 258	return machine__kernel_map(map->groups->machine) == map;
 259}
 260
 261bool __map__is_extra_kernel_map(const struct map *map)
 262{
 263	struct kmap *kmap = __map__kmap((struct map *)map);
 264
 265	return kmap && kmap->name[0];
 266}
 267
 268bool __map__is_bpf_prog(const struct map *map)
 269{
 270	const char *name;
 271
 272	if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
 273		return true;
 274
 275	/*
 276	 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
 277	 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
 278	 * guess the type based on name.
 279	 */
 280	name = map->dso->short_name;
 281	return name && (strstr(name, "bpf_prog_") == name);
 282}
 283
 284bool map__has_symbols(const struct map *map)
 285{
 286	return dso__has_symbols(map->dso);
 287}
 288
 289static void map__exit(struct map *map)
 290{
 291	BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
 292	dso__zput(map->dso);
 293}
 294
 295void map__delete(struct map *map)
 296{
 297	map__exit(map);
 298	free(map);
 299}
 300
 301void map__put(struct map *map)
 302{
 303	if (map && refcount_dec_and_test(&map->refcnt))
 304		map__delete(map);
 305}
 306
 307void map__fixup_start(struct map *map)
 308{
 309	struct rb_root_cached *symbols = &map->dso->symbols;
 310	struct rb_node *nd = rb_first_cached(symbols);
 311	if (nd != NULL) {
 312		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
 313		map->start = sym->start;
 314	}
 315}
 316
 317void map__fixup_end(struct map *map)
 318{
 319	struct rb_root_cached *symbols = &map->dso->symbols;
 320	struct rb_node *nd = rb_last(&symbols->rb_root);
 321	if (nd != NULL) {
 322		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
 323		map->end = sym->end;
 324	}
 325}
 326
 327#define DSO__DELETED "(deleted)"
 328
 329int map__load(struct map *map)
 330{
 331	const char *name = map->dso->long_name;
 332	int nr;
 333
 334	if (dso__loaded(map->dso))
 335		return 0;
 336
 337	nr = dso__load(map->dso, map);
 338	if (nr < 0) {
 339		if (map->dso->has_build_id) {
 340			char sbuild_id[SBUILD_ID_SIZE];
 341
 342			build_id__sprintf(map->dso->build_id,
 343					  sizeof(map->dso->build_id),
 344					  sbuild_id);
 345			pr_debug("%s with build id %s not found", name, sbuild_id);
 
 346		} else
 347			pr_debug("Failed to open %s", name);
 348
 349		pr_debug(", continuing without symbols\n");
 350		return -1;
 351	} else if (nr == 0) {
 352#ifdef HAVE_LIBELF_SUPPORT
 353		const size_t len = strlen(name);
 354		const size_t real_len = len - sizeof(DSO__DELETED);
 355
 356		if (len > sizeof(DSO__DELETED) &&
 357		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
 358			pr_debug("%.*s was updated (is prelink enabled?). "
 359				"Restart the long running apps that use it!\n",
 360				   (int)real_len, name);
 361		} else {
 362			pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
 
 363		}
 364#endif
 365		return -1;
 366	}
 367
 368	return 0;
 369}
 370
 371struct symbol *map__find_symbol(struct map *map, u64 addr)
 
 
 
 
 
 
 372{
 373	if (map__load(map) < 0)
 374		return NULL;
 375
 376	return dso__find_symbol(map->dso, addr);
 377}
 378
 379struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
 
 380{
 381	if (map__load(map) < 0)
 382		return NULL;
 383
 384	if (!dso__sorted_by_name(map->dso))
 385		dso__sort_by_name(map->dso);
 386
 387	return dso__find_symbol_by_name(map->dso, name);
 388}
 389
 390struct map *map__clone(struct map *from)
 391{
 392	struct map *map = memdup(from, sizeof(*map));
 393
 394	if (map != NULL) {
 395		refcount_set(&map->refcnt, 1);
 396		RB_CLEAR_NODE(&map->rb_node);
 397		dso__get(map->dso);
 398		map->groups = NULL;
 399	}
 400
 401	return map;
 402}
 403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404size_t map__fprintf(struct map *map, FILE *fp)
 405{
 406	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
 407		       map->start, map->end, map->pgoff, map->dso->name);
 408}
 409
 410size_t map__fprintf_dsoname(struct map *map, FILE *fp)
 411{
 412	char buf[symbol_conf.pad_output_len_dso + 1];
 413	const char *dsoname = "[unknown]";
 414
 415	if (map && map->dso) {
 416		if (symbol_conf.show_kernel_path && map->dso->long_name)
 417			dsoname = map->dso->long_name;
 418		else
 419			dsoname = map->dso->name;
 420	}
 421
 422	if (symbol_conf.pad_output_len_dso) {
 423		scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname);
 424		dsoname = buf;
 425	}
 426
 427	return fprintf(fp, "%s", dsoname);
 428}
 429
 430char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
 431{
 432	if (map == NULL)
 433		return SRCLINE_UNKNOWN;
 434	return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
 435}
 436
 437int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
 438			 FILE *fp)
 439{
 
 440	int ret = 0;
 441
 442	if (map && map->dso) {
 443		char *srcline = map__srcline(map, addr, NULL);
 
 444		if (srcline != SRCLINE_UNKNOWN)
 445			ret = fprintf(fp, "%s%s", prefix, srcline);
 446		free_srcline(srcline);
 447	}
 448	return ret;
 449}
 450
 451int map__fprintf_srccode(struct map *map, u64 addr,
 452			 FILE *fp,
 453			 struct srccode_state *state)
 454{
 455	char *srcfile;
 456	int ret = 0;
 457	unsigned line;
 458	int len;
 459	char *srccode;
 460
 461	if (!map || !map->dso)
 462		return 0;
 463	srcfile = get_srcline_split(map->dso,
 464				    map__rip_2objdump(map, addr),
 465				    &line);
 466	if (!srcfile)
 467		return 0;
 468
 469	/* Avoid redundant printing */
 470	if (state &&
 471	    state->srcfile &&
 472	    !strcmp(state->srcfile, srcfile) &&
 473	    state->line == line) {
 474		free(srcfile);
 475		return 0;
 476	}
 477
 478	srccode = find_sourceline(srcfile, line, &len);
 479	if (!srccode)
 480		goto out_free_line;
 481
 482	ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
 483
 484	if (state) {
 485		state->srcfile = srcfile;
 486		state->line = line;
 487	}
 488	return ret;
 489
 490out_free_line:
 491	free(srcfile);
 492	return ret;
 493}
 494
 495
 496void srccode_state_free(struct srccode_state *state)
 497{
 498	zfree(&state->srcfile);
 499	state->line = 0;
 500}
 501
 502/**
 503 * map__rip_2objdump - convert symbol start address to objdump address.
 504 * @map: memory map
 505 * @rip: symbol start address
 506 *
 507 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
 508 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
 509 * relative to section start.
 510 *
 511 * Return: Address suitable for passing to "objdump --start-address="
 512 */
 513u64 map__rip_2objdump(struct map *map, u64 rip)
 514{
 515	struct kmap *kmap = __map__kmap(map);
 516
 517	/*
 518	 * vmlinux does not have program headers for PTI entry trampolines and
 519	 * kcore may not either. However the trampoline object code is on the
 520	 * main kernel map, so just use that instead.
 521	 */
 522	if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
 523		struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
 524
 525		if (kernel_map)
 526			map = kernel_map;
 527	}
 528
 529	if (!map->dso->adjust_symbols)
 530		return rip;
 531
 532	if (map->dso->rel)
 533		return rip - map->pgoff;
 534
 535	/*
 536	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
 537	 * but all kernel modules are ET_REL, so won't get here.
 538	 */
 539	if (map->dso->kernel == DSO_TYPE_USER)
 540		return rip + map->dso->text_offset;
 541
 542	return map->unmap_ip(map, rip) - map->reloc;
 543}
 544
 545/**
 546 * map__objdump_2mem - convert objdump address to a memory address.
 547 * @map: memory map
 548 * @ip: objdump address
 549 *
 550 * Closely related to map__rip_2objdump(), this function takes an address from
 551 * objdump and converts it to a memory address.  Note this assumes that @map
 552 * contains the address.  To be sure the result is valid, check it forwards
 553 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
 554 *
 555 * Return: Memory address.
 556 */
 557u64 map__objdump_2mem(struct map *map, u64 ip)
 558{
 559	if (!map->dso->adjust_symbols)
 560		return map->unmap_ip(map, ip);
 561
 562	if (map->dso->rel)
 563		return map->unmap_ip(map, ip + map->pgoff);
 564
 565	/*
 566	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
 567	 * but all kernel modules are ET_REL, so won't get here.
 568	 */
 569	if (map->dso->kernel == DSO_TYPE_USER)
 570		return map->unmap_ip(map, ip - map->dso->text_offset);
 571
 572	return ip + map->reloc;
 573}
 574
 575static void maps__init(struct maps *maps)
 576{
 577	maps->entries = RB_ROOT;
 578	maps->names = RB_ROOT;
 579	init_rwsem(&maps->lock);
 580}
 581
 582void map_groups__init(struct map_groups *mg, struct machine *machine)
 583{
 584	maps__init(&mg->maps);
 
 
 
 585	mg->machine = machine;
 586	refcount_set(&mg->refcnt, 1);
 587}
 588
 589void map_groups__insert(struct map_groups *mg, struct map *map)
 590{
 591	maps__insert(&mg->maps, map);
 592	map->groups = mg;
 593}
 594
 595static void __maps__purge(struct maps *maps)
 596{
 597	struct rb_root *root = &maps->entries;
 598	struct rb_node *next = rb_first(root);
 599
 600	while (next) {
 601		struct map *pos = rb_entry(next, struct map, rb_node);
 602
 603		next = rb_next(&pos->rb_node);
 604		rb_erase_init(&pos->rb_node, root);
 605		map__put(pos);
 606	}
 607}
 608
 609static void __maps__purge_names(struct maps *maps)
 610{
 611	struct rb_root *root = &maps->names;
 612	struct rb_node *next = rb_first(root);
 613
 614	while (next) {
 615		struct map *pos = rb_entry(next, struct map, rb_node_name);
 616
 617		next = rb_next(&pos->rb_node_name);
 618		rb_erase_init(&pos->rb_node_name, root);
 619		map__put(pos);
 620	}
 621}
 622
 623static void maps__exit(struct maps *maps)
 624{
 625	down_write(&maps->lock);
 626	__maps__purge(maps);
 627	__maps__purge_names(maps);
 628	up_write(&maps->lock);
 629}
 630
 631void map_groups__exit(struct map_groups *mg)
 632{
 633	maps__exit(&mg->maps);
 
 
 
 634}
 635
 636bool map_groups__empty(struct map_groups *mg)
 637{
 638	return !maps__first(&mg->maps);
 
 
 
 
 
 
 
 639}
 640
 641struct map_groups *map_groups__new(struct machine *machine)
 642{
 643	struct map_groups *mg = zalloc(sizeof(*mg));
 644
 645	if (mg != NULL)
 646		map_groups__init(mg, machine);
 647
 648	return mg;
 649}
 650
 651void map_groups__delete(struct map_groups *mg)
 652{
 653	map_groups__exit(mg);
 654	unwind__finish_access(mg);
 655	free(mg);
 656}
 657
 658void map_groups__put(struct map_groups *mg)
 659{
 660	if (mg && refcount_dec_and_test(&mg->refcnt))
 661		map_groups__delete(mg);
 662}
 663
 664struct symbol *map_groups__find_symbol(struct map_groups *mg,
 665				       u64 addr, struct map **mapp)
 
 
 666{
 667	struct map *map = map_groups__find(mg, addr);
 668
 669	/* Ensure map is loaded before using map->map_ip */
 670	if (map != NULL && map__load(map) >= 0) {
 671		if (mapp != NULL)
 672			*mapp = map;
 673		return map__find_symbol(map, map->map_ip(map, addr));
 674	}
 675
 676	return NULL;
 677}
 678
 679static bool map__contains_symbol(struct map *map, struct symbol *sym)
 680{
 681	u64 ip = map->unmap_ip(map, sym->start);
 682
 683	return ip >= map->start && ip < map->end;
 684}
 685
 686struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
 687					 struct map **mapp)
 688{
 689	struct symbol *sym;
 690	struct rb_node *nd;
 691
 692	down_read(&maps->lock);
 693
 694	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
 695		struct map *pos = rb_entry(nd, struct map, rb_node);
 696
 697		sym = map__find_symbol_by_name(pos, name);
 698
 699		if (sym == NULL)
 700			continue;
 701		if (!map__contains_symbol(pos, sym)) {
 702			sym = NULL;
 703			continue;
 704		}
 705		if (mapp != NULL)
 706			*mapp = pos;
 707		goto out;
 708	}
 709
 710	sym = NULL;
 711out:
 712	up_read(&maps->lock);
 713	return sym;
 714}
 715
 716struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
 
 717					       const char *name,
 718					       struct map **mapp)
 
 719{
 720	return maps__find_symbol_by_name(&mg->maps, name, mapp);
 
 
 721}
 722
 723int map_groups__find_ams(struct addr_map_symbol *ams)
 724{
 725	if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
 726		if (ams->map->groups == NULL)
 727			return -1;
 728		ams->map = map_groups__find(ams->map->groups, ams->addr);
 
 729		if (ams->map == NULL)
 730			return -1;
 731	}
 732
 733	ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
 734	ams->sym = map__find_symbol(ams->map, ams->al_addr);
 735
 736	return ams->sym ? 0 : -1;
 737}
 738
 739static size_t maps__fprintf(struct maps *maps, FILE *fp)
 740{
 741	size_t printed = 0;
 742	struct rb_node *nd;
 743
 744	down_read(&maps->lock);
 745
 746	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
 747		struct map *pos = rb_entry(nd, struct map, rb_node);
 748		printed += fprintf(fp, "Map:");
 749		printed += map__fprintf(pos, fp);
 750		if (verbose > 2) {
 751			printed += dso__fprintf(pos->dso, fp);
 752			printed += fprintf(fp, "--\n");
 753		}
 754	}
 755
 756	up_read(&maps->lock);
 757
 758	return printed;
 759}
 760
 
 
 
 
 
 
 
 761size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
 762{
 763	return maps__fprintf(&mg->maps, fp);
 
 
 
 764}
 765
 766static void __map_groups__insert(struct map_groups *mg, struct map *map)
 767{
 768	__maps__insert(&mg->maps, map);
 769	__maps__insert_name(&mg->maps, map);
 770	map->groups = mg;
 771}
 772
 773static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
 774{
 775	struct rb_root *root;
 776	struct rb_node *next, *first;
 777	int err = 0;
 778
 779	down_write(&maps->lock);
 780
 781	root = &maps->entries;
 
 782
 783	/*
 784	 * Find first map where end > map->start.
 785	 * Same as find_vma() in kernel.
 786	 */
 787	next = root->rb_node;
 788	first = NULL;
 789	while (next) {
 790		struct map *pos = rb_entry(next, struct map, rb_node);
 791
 792		if (pos->end > map->start) {
 793			first = next;
 794			if (pos->start <= map->start)
 795				break;
 796			next = next->rb_left;
 797		} else
 798			next = next->rb_right;
 799	}
 800
 801	next = first;
 802	while (next) {
 803		struct map *pos = rb_entry(next, struct map, rb_node);
 804		next = rb_next(&pos->rb_node);
 805
 806		/*
 807		 * Stop if current map starts after map->end.
 808		 * Maps are ordered by start: next will not overlap for sure.
 809		 */
 810		if (pos->start >= map->end)
 811			break;
 812
 813		if (verbose >= 2) {
 814
 815			if (use_browser) {
 816				pr_debug("overlapping maps in %s (disable tui for more info)\n",
 817					   map->dso->name);
 818			} else {
 819				fputs("overlapping maps:\n", fp);
 820				map__fprintf(map, fp);
 821				map__fprintf(pos, fp);
 822			}
 823		}
 824
 825		rb_erase_init(&pos->rb_node, root);
 826		/*
 827		 * Now check if we need to create new maps for areas not
 828		 * overlapped by the new map:
 829		 */
 830		if (map->start > pos->start) {
 831			struct map *before = map__clone(pos);
 832
 833			if (before == NULL) {
 834				err = -ENOMEM;
 835				goto put_map;
 836			}
 837
 838			before->end = map->start;
 839			__map_groups__insert(pos->groups, before);
 840			if (verbose >= 2 && !use_browser)
 841				map__fprintf(before, fp);
 842			map__put(before);
 843		}
 844
 845		if (map->end < pos->end) {
 846			struct map *after = map__clone(pos);
 847
 848			if (after == NULL) {
 849				err = -ENOMEM;
 850				goto put_map;
 851			}
 852
 853			after->start = map->end;
 854			after->pgoff += map->end - pos->start;
 855			assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
 856			__map_groups__insert(pos->groups, after);
 857			if (verbose >= 2 && !use_browser)
 858				map__fprintf(after, fp);
 859			map__put(after);
 860		}
 861put_map:
 862		map__put(pos);
 863
 864		if (err)
 865			goto out;
 866	}
 867
 868	err = 0;
 869out:
 870	up_write(&maps->lock);
 871	return err;
 872}
 873
 874int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
 875				   FILE *fp)
 876{
 877	return maps__fixup_overlappings(&mg->maps, map, fp);
 878}
 879
 880/*
 881 * XXX This should not really _copy_ te maps, but refcount them.
 882 */
 883int map_groups__clone(struct thread *thread, struct map_groups *parent)
 
 884{
 885	struct map_groups *mg = thread->mg;
 886	int err = -ENOMEM;
 887	struct map *map;
 888	struct maps *maps = &parent->maps;
 889
 890	down_read(&maps->lock);
 891
 892	for (map = maps__first(maps); map; map = map__next(map)) {
 893		struct map *new = map__clone(map);
 894		if (new == NULL)
 895			goto out_unlock;
 896
 897		err = unwind__prepare_access(mg, new, NULL);
 898		if (err)
 899			goto out_unlock;
 900
 901		map_groups__insert(mg, new);
 902		map__put(new);
 903	}
 904
 905	err = 0;
 906out_unlock:
 907	up_read(&maps->lock);
 908	return err;
 909}
 910
 911static void __maps__insert(struct maps *maps, struct map *map)
 912{
 913	struct rb_node **p = &maps->entries.rb_node;
 914	struct rb_node *parent = NULL;
 915	const u64 ip = map->start;
 916	struct map *m;
 917
 918	while (*p != NULL) {
 919		parent = *p;
 920		m = rb_entry(parent, struct map, rb_node);
 921		if (ip < m->start)
 922			p = &(*p)->rb_left;
 923		else
 924			p = &(*p)->rb_right;
 925	}
 926
 927	rb_link_node(&map->rb_node, parent, p);
 928	rb_insert_color(&map->rb_node, &maps->entries);
 929	map__get(map);
 930}
 931
 932static void __maps__insert_name(struct maps *maps, struct map *map)
 933{
 934	struct rb_node **p = &maps->names.rb_node;
 935	struct rb_node *parent = NULL;
 936	struct map *m;
 937	int rc;
 938
 939	while (*p != NULL) {
 940		parent = *p;
 941		m = rb_entry(parent, struct map, rb_node_name);
 942		rc = strcmp(m->dso->short_name, map->dso->short_name);
 943		if (rc < 0)
 944			p = &(*p)->rb_left;
 945		else
 946			p = &(*p)->rb_right;
 947	}
 948	rb_link_node(&map->rb_node_name, parent, p);
 949	rb_insert_color(&map->rb_node_name, &maps->names);
 950	map__get(map);
 951}
 952
 953void maps__insert(struct maps *maps, struct map *map)
 954{
 955	down_write(&maps->lock);
 956	__maps__insert(maps, map);
 957	__maps__insert_name(maps, map);
 958	up_write(&maps->lock);
 959}
 960
 961static void __maps__remove(struct maps *maps, struct map *map)
 962{
 963	rb_erase_init(&map->rb_node, &maps->entries);
 964	map__put(map);
 965
 966	rb_erase_init(&map->rb_node_name, &maps->names);
 967	map__put(map);
 968}
 969
 970void maps__remove(struct maps *maps, struct map *map)
 971{
 972	down_write(&maps->lock);
 973	__maps__remove(maps, map);
 974	up_write(&maps->lock);
 975}
 976
 977struct map *maps__find(struct maps *maps, u64 ip)
 978{
 979	struct rb_node *p;
 980	struct map *m;
 981
 982	down_read(&maps->lock);
 983
 984	p = maps->entries.rb_node;
 985	while (p != NULL) {
 986		m = rb_entry(p, struct map, rb_node);
 
 987		if (ip < m->start)
 988			p = p->rb_left;
 989		else if (ip >= m->end)
 990			p = p->rb_right;
 991		else
 992			goto out;
 993	}
 994
 995	m = NULL;
 996out:
 997	up_read(&maps->lock);
 998	return m;
 999}
1000
1001struct map *maps__first(struct maps *maps)
1002{
1003	struct rb_node *first = rb_first(&maps->entries);
1004
1005	if (first)
1006		return rb_entry(first, struct map, rb_node);
1007	return NULL;
1008}
1009
1010struct map *map__next(struct map *map)
1011{
1012	struct rb_node *next = rb_next(&map->rb_node);
1013
1014	if (next)
1015		return rb_entry(next, struct map, rb_node);
1016	return NULL;
1017}
1018
1019struct kmap *__map__kmap(struct map *map)
1020{
1021	if (!map->dso || !map->dso->kernel)
 
1022		return NULL;
 
1023	return (struct kmap *)(map + 1);
1024}
1025
1026struct kmap *map__kmap(struct map *map)
1027{
1028	struct kmap *kmap = __map__kmap(map);
1029
1030	if (!kmap)
1031		pr_err("Internal error: map__kmap with a non-kernel map\n");
1032	return kmap;
1033}
1034
1035struct map_groups *map__kmaps(struct map *map)
1036{
1037	struct kmap *kmap = map__kmap(map);
1038
1039	if (!kmap || !kmap->kmaps) {
1040		pr_err("Internal error: map__kmaps with a non-kernel map\n");
1041		return NULL;
1042	}
1043	return kmap->kmaps;
1044}