Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1#include "symbol.h"
 
  2#include <errno.h>
  3#include <inttypes.h>
  4#include <limits.h>
  5#include <stdlib.h>
  6#include <string.h>
  7#include <stdio.h>
  8#include <unistd.h>
 
 
  9#include "map.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10
 11const char *map_type__name[MAP__NR_TYPES] = {
 12	[MAP__FUNCTION] = "Functions",
 13	[MAP__VARIABLE] = "Variables",
 14};
 
 
 15
 16static inline int is_anon_memory(const char *filename)
 17{
 18	return strcmp(filename, "//anon") == 0;
 
 19}
 20
 21static inline int is_no_dso_memory(const char *filename)
 22{
 23	return !strcmp(filename, "[stack]") ||
 24	       !strcmp(filename, "[vdso]")  ||
 25	       !strcmp(filename, "[heap]");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26}
 27
 28void map__init(struct map *self, enum map_type type,
 29	       u64 start, u64 end, u64 pgoff, struct dso *dso)
 30{
 31	self->type     = type;
 32	self->start    = start;
 33	self->end      = end;
 34	self->pgoff    = pgoff;
 35	self->dso      = dso;
 36	self->map_ip   = map__map_ip;
 37	self->unmap_ip = map__unmap_ip;
 38	RB_CLEAR_NODE(&self->rb_node);
 39	self->groups   = NULL;
 40	self->referenced = false;
 41	self->erange_warned = false;
 42}
 43
 44struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
 45		     u64 pgoff, u32 pid, char *filename,
 46		     enum map_type type)
 47{
 48	struct map *self = malloc(sizeof(*self));
 
 
 49
 50	if (self != NULL) {
 51		char newfilename[PATH_MAX];
 52		struct dso *dso;
 53		int anon, no_dso;
 54
 55		anon = is_anon_memory(filename);
 
 
 56		no_dso = is_no_dso_memory(filename);
 57
 58		if (anon) {
 59			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
 
 
 
 
 60			filename = newfilename;
 61		}
 62
 63		dso = __dsos__findnew(dsos__list, filename);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64		if (dso == NULL)
 65			goto out_delete;
 66
 67		map__init(self, type, start, start + len, pgoff, dso);
 68
 69		if (anon || no_dso) {
 70			self->map_ip = self->unmap_ip = identity__map_ip;
 71
 72			/*
 73			 * Set memory without DSO as loaded. All map__find_*
 74			 * functions still return NULL, and we avoid the
 75			 * unnecessary map__load warning.
 76			 */
 77			if (no_dso)
 78				dso__set_loaded(dso, self->type);
 79		}
 
 
 80	}
 81	return self;
 82out_delete:
 83	free(self);
 
 84	return NULL;
 85}
 86
 87void map__delete(struct map *self)
 
 
 
 
 
 88{
 89	free(self);
 
 
 
 
 
 
 
 
 
 90}
 91
 92void map__fixup_start(struct map *self)
 93{
 94	struct rb_root *symbols = &self->dso->symbols[self->type];
 95	struct rb_node *nd = rb_first(symbols);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96	if (nd != NULL) {
 97		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
 98		self->start = sym->start;
 99	}
100}
101
102void map__fixup_end(struct map *self)
103{
104	struct rb_root *symbols = &self->dso->symbols[self->type];
105	struct rb_node *nd = rb_last(symbols);
106	if (nd != NULL) {
107		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
108		self->end = sym->end;
109	}
110}
111
112#define DSO__DELETED "(deleted)"
113
114int map__load(struct map *self, symbol_filter_t filter)
115{
116	const char *name = self->dso->long_name;
117	int nr;
118
119	if (dso__loaded(self->dso, self->type))
120		return 0;
121
122	nr = dso__load(self->dso, self, filter);
123	if (nr < 0) {
124		if (self->dso->has_build_id) {
125			char sbuild_id[BUILD_ID_SIZE * 2 + 1];
126
127			build_id__sprintf(self->dso->build_id,
128					  sizeof(self->dso->build_id),
129					  sbuild_id);
130			pr_warning("%s with build id %s not found",
131				   name, sbuild_id);
132		} else
133			pr_warning("Failed to open %s", name);
134
135		pr_warning(", continuing without symbols\n");
136		return -1;
137	} else if (nr == 0) {
 
138		const size_t len = strlen(name);
139		const size_t real_len = len - sizeof(DSO__DELETED);
140
141		if (len > sizeof(DSO__DELETED) &&
142		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
143			pr_warning("%.*s was updated (is prelink enabled?). "
144				"Restart the long running apps that use it!\n",
145				   (int)real_len, name);
146		} else {
147			pr_warning("no symbols found in %s, maybe install "
148				   "a debug package?\n", name);
149		}
150
151		return -1;
152	}
153	/*
154	 * Only applies to the kernel, as its symtabs aren't relative like the
155	 * module ones.
156	 */
157	if (self->dso->kernel)
158		map__reloc_vmlinux(self);
159
160	return 0;
161}
162
163struct symbol *map__find_symbol(struct map *self, u64 addr,
164				symbol_filter_t filter)
165{
166	if (map__load(self, filter) < 0)
167		return NULL;
168
169	return dso__find_symbol(self->dso, self->type, addr);
170}
171
172struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
173					symbol_filter_t filter)
174{
175	if (map__load(self, filter) < 0)
176		return NULL;
177
178	if (!dso__sorted_by_name(self->dso, self->type))
179		dso__sort_by_name(self->dso, self->type);
180
181	return dso__find_symbol_by_name(self->dso, self->type, name);
182}
183
184struct map *map__clone(struct map *self)
185{
186	struct map *map = malloc(sizeof(*self));
187
188	if (!map)
189		return NULL;
190
191	memcpy(map, self, sizeof(*self));
 
192
193	return map;
194}
195
196int map__overlap(struct map *l, struct map *r)
197{
198	if (l->start > r->start) {
199		struct map *t = l;
200		l = r;
201		r = t;
202	}
203
204	if (l->end > r->start)
205		return 1;
206
207	return 0;
208}
209
210size_t map__fprintf(struct map *self, FILE *fp)
211{
212	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
213		       self->start, self->end, self->pgoff, self->dso->name);
214}
215
216size_t map__fprintf_dsoname(struct map *map, FILE *fp)
217{
218	const char *dsoname;
 
219
220	if (map && map->dso && (map->dso->name || map->dso->long_name)) {
221		if (symbol_conf.show_kernel_path && map->dso->long_name)
222			dsoname = map->dso->long_name;
223		else if (map->dso->name)
224			dsoname = map->dso->name;
225	} else
226		dsoname = "[unknown]";
 
 
 
 
227
228	return fprintf(fp, "%s", dsoname);
229}
230
231/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
233 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
 
 
 
234 */
235u64 map__rip_2objdump(struct map *map, u64 rip)
236{
237	u64 addr = map->dso->adjust_symbols ?
238			map->unmap_ip(map, rip) :	/* RIP -> IP */
239			rip;
240	return addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241}
242
243u64 map__objdump_2ip(struct map *map, u64 addr)
 
 
 
 
 
 
 
 
 
 
 
 
244{
245	u64 ip = map->dso->adjust_symbols ?
246			addr :
247			map->unmap_ip(map, addr);	/* RIP -> IP */
248	return ip;
 
 
 
 
 
 
 
 
 
 
249}
250
251void map_groups__init(struct map_groups *mg)
252{
253	int i;
254	for (i = 0; i < MAP__NR_TYPES; ++i) {
255		mg->maps[i] = RB_ROOT;
256		INIT_LIST_HEAD(&mg->removed_maps[i]);
257	}
258	mg->machine = NULL;
 
259}
260
261static void maps__delete(struct rb_root *maps)
262{
263	struct rb_node *next = rb_first(maps);
 
 
 
 
 
264
265	while (next) {
266		struct map *pos = rb_entry(next, struct map, rb_node);
 
 
 
267
268		next = rb_next(&pos->rb_node);
269		rb_erase(&pos->rb_node, maps);
270		map__delete(pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271	}
 
272}
273
274static void maps__delete_removed(struct list_head *maps)
275{
276	struct map *pos, *n;
 
 
277
278	list_for_each_entry_safe(pos, n, maps, node) {
279		list_del(&pos->node);
280		map__delete(pos);
281	}
 
 
 
 
 
 
 
282}
283
284void map_groups__exit(struct map_groups *mg)
285{
286	int i;
287
288	for (i = 0; i < MAP__NR_TYPES; ++i) {
289		maps__delete(&mg->maps[i]);
290		maps__delete_removed(&mg->removed_maps[i]);
291	}
292}
293
294void map_groups__flush(struct map_groups *mg)
295{
296	int type;
 
 
 
297
298	for (type = 0; type < MAP__NR_TYPES; type++) {
299		struct rb_root *root = &mg->maps[type];
300		struct rb_node *next = rb_first(root);
 
301
302		while (next) {
303			struct map *pos = rb_entry(next, struct map, rb_node);
304			next = rb_next(&pos->rb_node);
305			rb_erase(&pos->rb_node, root);
306			/*
307			 * We may have references to this map, for
308			 * instance in some hist_entry instances, so
309			 * just move them to a separate list.
310			 */
311			list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
312		}
313	}
314}
315
316struct symbol *map_groups__find_symbol(struct map_groups *mg,
317				       enum map_type type, u64 addr,
318				       struct map **mapp,
319				       symbol_filter_t filter)
320{
321	struct map *map = map_groups__find(mg, type, addr);
 
 
 
322
323	if (map != NULL) {
 
 
 
 
 
 
 
 
 
 
 
324		if (mapp != NULL)
325			*mapp = map;
326		return map__find_symbol(map, map->map_ip(map, addr), filter);
327	}
328
329	return NULL;
330}
331
332struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
333					       enum map_type type,
334					       const char *name,
335					       struct map **mapp,
336					       symbol_filter_t filter)
337{
338	struct rb_node *nd;
339
340	for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
341		struct map *pos = rb_entry(nd, struct map, rb_node);
342		struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
 
 
 
 
 
343
344		if (sym == NULL)
345			continue;
 
 
 
 
346		if (mapp != NULL)
347			*mapp = pos;
348		return sym;
349	}
350
351	return NULL;
 
 
 
352}
353
354size_t __map_groups__fprintf_maps(struct map_groups *mg,
355				  enum map_type type, int verbose, FILE *fp)
356{
357	size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
358	struct rb_node *nd;
359
360	for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
361		struct map *pos = rb_entry(nd, struct map, rb_node);
362		printed += fprintf(fp, "Map:");
363		printed += map__fprintf(pos, fp);
364		if (verbose > 2) {
365			printed += dso__fprintf(pos->dso, type, fp);
366			printed += fprintf(fp, "--\n");
367		}
368	}
369
370	return printed;
371}
372
373size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
374{
375	size_t printed = 0, i;
376	for (i = 0; i < MAP__NR_TYPES; ++i)
377		printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
378	return printed;
379}
380
381static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
382						 enum map_type type,
383						 int verbose, FILE *fp)
384{
385	struct map *pos;
386	size_t printed = 0;
 
 
 
387
388	list_for_each_entry(pos, &mg->removed_maps[type], node) {
389		printed += fprintf(fp, "Map:");
390		printed += map__fprintf(pos, fp);
391		if (verbose > 1) {
392			printed += dso__fprintf(pos->dso, type, fp);
393			printed += fprintf(fp, "--\n");
394		}
395	}
396	return printed;
397}
398
399static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
400					       int verbose, FILE *fp)
401{
402	size_t printed = 0, i;
403	for (i = 0; i < MAP__NR_TYPES; ++i)
404		printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
405	return printed;
406}
407
408size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
409{
410	size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
411	printed += fprintf(fp, "Removed maps:\n");
412	return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
413}
414
415int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
416				   int verbose, FILE *fp)
417{
418	struct rb_root *root = &mg->maps[map->type];
419	struct rb_node *next = rb_first(root);
420	int err = 0;
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422	while (next) {
423		struct map *pos = rb_entry(next, struct map, rb_node);
424		next = rb_next(&pos->rb_node);
425
426		if (!map__overlap(pos, map))
427			continue;
 
 
 
 
428
429		if (verbose >= 2) {
430			fputs("overlapping maps:\n", fp);
431			map__fprintf(map, fp);
432			map__fprintf(pos, fp);
 
 
 
 
 
 
433		}
434
435		rb_erase(&pos->rb_node, root);
436		/*
437		 * Now check if we need to create new maps for areas not
438		 * overlapped by the new map:
439		 */
440		if (map->start > pos->start) {
441			struct map *before = map__clone(pos);
442
443			if (before == NULL) {
444				err = -ENOMEM;
445				goto move_map;
446			}
447
448			before->end = map->start - 1;
449			map_groups__insert(mg, before);
450			if (verbose >= 2)
451				map__fprintf(before, fp);
 
452		}
453
454		if (map->end < pos->end) {
455			struct map *after = map__clone(pos);
456
457			if (after == NULL) {
458				err = -ENOMEM;
459				goto move_map;
460			}
461
462			after->start = map->end + 1;
463			map_groups__insert(mg, after);
464			if (verbose >= 2)
 
 
465				map__fprintf(after, fp);
 
466		}
467move_map:
468		/*
469		 * If we have references, just move them to a separate list.
470		 */
471		if (pos->referenced)
472			list_add_tail(&pos->node, &mg->removed_maps[map->type]);
473		else
474			map__delete(pos);
475
476		if (err)
477			return err;
478	}
479
480	return 0;
 
 
 
481}
482
483/*
484 * XXX This should not really _copy_ te maps, but refcount them.
485 */
486int map_groups__clone(struct map_groups *mg,
487		      struct map_groups *parent, enum map_type type)
488{
489	struct rb_node *nd;
490	for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
491		struct map *map = rb_entry(nd, struct map, rb_node);
492		struct map *new = map__clone(map);
493		if (new == NULL)
494			return -ENOMEM;
495		map_groups__insert(mg, new);
496	}
497	return 0;
498}
499
500static u64 map__reloc_map_ip(struct map *map, u64 ip)
501{
502	return ip + (s64)map->pgoff;
503}
504
505static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
506{
507	return ip - (s64)map->pgoff;
508}
509
510void map__reloc_vmlinux(struct map *self)
511{
512	struct kmap *kmap = map__kmap(self);
513	s64 reloc;
514
515	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
516		return;
517
518	reloc = (kmap->ref_reloc_sym->unrelocated_addr -
519		 kmap->ref_reloc_sym->addr);
 
520
521	if (!reloc)
522		return;
 
523
524	self->map_ip   = map__reloc_map_ip;
525	self->unmap_ip = map__reloc_unmap_ip;
526	self->pgoff    = reloc;
 
527}
528
529void maps__insert(struct rb_root *maps, struct map *map)
530{
531	struct rb_node **p = &maps->rb_node;
532	struct rb_node *parent = NULL;
533	const u64 ip = map->start;
534	struct map *m;
535
536	while (*p != NULL) {
537		parent = *p;
538		m = rb_entry(parent, struct map, rb_node);
539		if (ip < m->start)
540			p = &(*p)->rb_left;
541		else
542			p = &(*p)->rb_right;
543	}
544
545	rb_link_node(&map->rb_node, parent, p);
546	rb_insert_color(&map->rb_node, maps);
 
547}
548
549void maps__remove(struct rb_root *self, struct map *map)
550{
551	rb_erase(&map->rb_node, self);
552}
553
554struct map *maps__find(struct rb_root *maps, u64 ip)
555{
556	struct rb_node **p = &maps->rb_node;
557	struct rb_node *parent = NULL;
558	struct map *m;
559
560	while (*p != NULL) {
561		parent = *p;
562		m = rb_entry(parent, struct map, rb_node);
 
 
563		if (ip < m->start)
564			p = &(*p)->rb_left;
565		else if (ip > m->end)
566			p = &(*p)->rb_right;
567		else
568			return m;
569	}
570
571	return NULL;
 
 
 
572}
573
574int machine__init(struct machine *self, const char *root_dir, pid_t pid)
575{
576	map_groups__init(&self->kmaps);
577	RB_CLEAR_NODE(&self->rb_node);
578	INIT_LIST_HEAD(&self->user_dsos);
579	INIT_LIST_HEAD(&self->kernel_dsos);
580
581	self->threads = RB_ROOT;
582	INIT_LIST_HEAD(&self->dead_threads);
583	self->last_match = NULL;
584
585	self->kmaps.machine = self;
586	self->pid	    = pid;
587	self->root_dir      = strdup(root_dir);
588	return self->root_dir == NULL ? -ENOMEM : 0;
589}
590
591static void dsos__delete(struct list_head *self)
592{
593	struct dso *pos, *n;
594
595	list_for_each_entry_safe(pos, n, self, node) {
596		list_del(&pos->node);
597		dso__delete(pos);
598	}
599}
600
601void machine__exit(struct machine *self)
602{
603	map_groups__exit(&self->kmaps);
604	dsos__delete(&self->user_dsos);
605	dsos__delete(&self->kernel_dsos);
606	free(self->root_dir);
607	self->root_dir = NULL;
608}
609
610void machine__delete(struct machine *self)
611{
612	machine__exit(self);
613	free(self);
614}
615
616struct machine *machines__add(struct rb_root *self, pid_t pid,
617			      const char *root_dir)
618{
619	struct rb_node **p = &self->rb_node;
620	struct rb_node *parent = NULL;
621	struct machine *pos, *machine = malloc(sizeof(*machine));
622
623	if (!machine)
624		return NULL;
625
626	if (machine__init(machine, root_dir, pid) != 0) {
627		free(machine);
628		return NULL;
629	}
630
631	while (*p != NULL) {
632		parent = *p;
633		pos = rb_entry(parent, struct machine, rb_node);
634		if (pid < pos->pid)
635			p = &(*p)->rb_left;
636		else
637			p = &(*p)->rb_right;
638	}
639
640	rb_link_node(&machine->rb_node, parent, p);
641	rb_insert_color(&machine->rb_node, self);
642
643	return machine;
644}
645
646struct machine *machines__find(struct rb_root *self, pid_t pid)
647{
648	struct rb_node **p = &self->rb_node;
649	struct rb_node *parent = NULL;
650	struct machine *machine;
651	struct machine *default_machine = NULL;
652
653	while (*p != NULL) {
654		parent = *p;
655		machine = rb_entry(parent, struct machine, rb_node);
656		if (pid < machine->pid)
657			p = &(*p)->rb_left;
658		else if (pid > machine->pid)
659			p = &(*p)->rb_right;
660		else
661			return machine;
662		if (!machine->pid)
663			default_machine = machine;
664	}
665
666	return default_machine;
667}
668
669struct machine *machines__findnew(struct rb_root *self, pid_t pid)
670{
671	char path[PATH_MAX];
672	const char *root_dir = "";
673	struct machine *machine = machines__find(self, pid);
674
675	if (machine && (machine->pid == pid))
676		goto out;
677
678	if ((pid != HOST_KERNEL_ID) &&
679	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
680	    (symbol_conf.guestmount)) {
681		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
682		if (access(path, R_OK)) {
683			pr_err("Can't access file %s\n", path);
684			machine = NULL;
685			goto out;
686		}
687		root_dir = path;
688	}
689
690	machine = machines__add(self, pid, root_dir);
691
692out:
693	return machine;
694}
695
696void machines__process(struct rb_root *self, machine__process_t process, void *data)
697{
698	struct rb_node *nd;
699
700	for (nd = rb_first(self); nd; nd = rb_next(nd)) {
701		struct machine *pos = rb_entry(nd, struct machine, rb_node);
702		process(pos, data);
703	}
704}
705
706char *machine__mmap_name(struct machine *self, char *bf, size_t size)
707{
708	if (machine__is_host(self))
709		snprintf(bf, size, "[%s]", "kernel.kallsyms");
710	else if (machine__is_default_guest(self))
711		snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
712	else
713		snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
714
715	return bf;
716}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include "symbol.h"
  3#include <assert.h>
  4#include <errno.h>
  5#include <inttypes.h>
  6#include <limits.h>
  7#include <stdlib.h>
  8#include <string.h>
  9#include <stdio.h>
 10#include <unistd.h>
 11#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
 12#include "dso.h"
 13#include "map.h"
 14#include "map_symbol.h"
 15#include "thread.h"
 16#include "vdso.h"
 17#include "build-id.h"
 18#include "debug.h"
 19#include "machine.h"
 20#include <linux/string.h>
 21#include <linux/zalloc.h>
 22#include "srcline.h"
 23#include "namespaces.h"
 24#include "unwind.h"
 25#include "srccode.h"
 26#include "ui/ui.h"
 27
 28static void __maps__insert(struct maps *maps, struct map *map);
 29
 30static inline int is_anon_memory(const char *filename, u32 flags)
 31{
 32	return flags & MAP_HUGETLB ||
 33	       !strcmp(filename, "//anon") ||
 34	       !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
 35	       !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
 36}
 37
 38static inline int is_no_dso_memory(const char *filename)
 39{
 40	return !strncmp(filename, "[stack", 6) ||
 41	       !strncmp(filename, "/SYSV",5)   ||
 42	       !strcmp(filename, "[heap]");
 43}
 44
 45static inline int is_android_lib(const char *filename)
 46{
 47	return strstarts(filename, "/data/app-lib/") ||
 48	       strstarts(filename, "/system/lib/");
 49}
 50
 51static inline bool replace_android_lib(const char *filename, char *newfilename)
 52{
 53	const char *libname;
 54	char *app_abi;
 55	size_t app_abi_length, new_length;
 56	size_t lib_length = 0;
 57
 58	libname  = strrchr(filename, '/');
 59	if (libname)
 60		lib_length = strlen(libname);
 61
 62	app_abi = getenv("APP_ABI");
 63	if (!app_abi)
 64		return false;
 65
 66	app_abi_length = strlen(app_abi);
 67
 68	if (strstarts(filename, "/data/app-lib/")) {
 69		char *apk_path;
 70
 71		if (!app_abi_length)
 72			return false;
 73
 74		new_length = 7 + app_abi_length + lib_length;
 75
 76		apk_path = getenv("APK_PATH");
 77		if (apk_path) {
 78			new_length += strlen(apk_path) + 1;
 79			if (new_length > PATH_MAX)
 80				return false;
 81			snprintf(newfilename, new_length,
 82				 "%s/libs/%s/%s", apk_path, app_abi, libname);
 83		} else {
 84			if (new_length > PATH_MAX)
 85				return false;
 86			snprintf(newfilename, new_length,
 87				 "libs/%s/%s", app_abi, libname);
 88		}
 89		return true;
 90	}
 91
 92	if (strstarts(filename, "/system/lib/")) {
 93		char *ndk, *app;
 94		const char *arch;
 95		size_t ndk_length;
 96		size_t app_length;
 97
 98		ndk = getenv("NDK_ROOT");
 99		app = getenv("APP_PLATFORM");
100
101		if (!(ndk && app))
102			return false;
103
104		ndk_length = strlen(ndk);
105		app_length = strlen(app);
106
107		if (!(ndk_length && app_length && app_abi_length))
108			return false;
109
110		arch = !strncmp(app_abi, "arm", 3) ? "arm" :
111		       !strncmp(app_abi, "mips", 4) ? "mips" :
112		       !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
113
114		if (!arch)
115			return false;
116
117		new_length = 27 + ndk_length +
118			     app_length + lib_length
119			   + strlen(arch);
120
121		if (new_length > PATH_MAX)
122			return false;
123		snprintf(newfilename, new_length,
124			"%s/platforms/%s/arch-%s/usr/lib/%s",
125			ndk, app, arch, libname);
126
127		return true;
128	}
129	return false;
130}
131
132void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
 
133{
134	map->start    = start;
135	map->end      = end;
136	map->pgoff    = pgoff;
137	map->reloc    = 0;
138	map->dso      = dso__get(dso);
139	map->map_ip   = map__map_ip;
140	map->unmap_ip = map__unmap_ip;
141	RB_CLEAR_NODE(&map->rb_node);
142	map->erange_warned = false;
143	refcount_set(&map->refcnt, 1);
144}
145
146struct map *map__new(struct machine *machine, u64 start, u64 len,
147		     u64 pgoff, struct dso_id *id,
148		     u32 prot, u32 flags, char *filename,
149		     struct thread *thread)
150{
151	struct map *map = malloc(sizeof(*map));
152	struct nsinfo *nsi = NULL;
153	struct nsinfo *nnsi;
154
155	if (map != NULL) {
156		char newfilename[PATH_MAX];
157		struct dso *dso;
158		int anon, no_dso, vdso, android;
159
160		android = is_android_lib(filename);
161		anon = is_anon_memory(filename, flags);
162		vdso = is_vdso_map(filename);
163		no_dso = is_no_dso_memory(filename);
164		map->prot = prot;
165		map->flags = flags;
166		nsi = nsinfo__get(thread->nsinfo);
167
168		if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
169			snprintf(newfilename, sizeof(newfilename),
170				 "/tmp/perf-%d.map", nsi->pid);
171			filename = newfilename;
172		}
173
174		if (android) {
175			if (replace_android_lib(filename, newfilename))
176				filename = newfilename;
177		}
178
179		if (vdso) {
180			/* The vdso maps are always on the host and not the
181			 * container.  Ensure that we don't use setns to look
182			 * them up.
183			 */
184			nnsi = nsinfo__copy(nsi);
185			if (nnsi) {
186				nsinfo__put(nsi);
187				nnsi->need_setns = false;
188				nsi = nnsi;
189			}
190			pgoff = 0;
191			dso = machine__findnew_vdso(machine, thread);
192		} else
193			dso = machine__findnew_dso_id(machine, filename, id);
194
195		if (dso == NULL)
196			goto out_delete;
197
198		map__init(map, start, start + len, pgoff, dso);
199
200		if (anon || no_dso) {
201			map->map_ip = map->unmap_ip = identity__map_ip;
202
203			/*
204			 * Set memory without DSO as loaded. All map__find_*
205			 * functions still return NULL, and we avoid the
206			 * unnecessary map__load warning.
207			 */
208			if (!(prot & PROT_EXEC))
209				dso__set_loaded(dso);
210		}
211		dso->nsinfo = nsi;
212		dso__put(dso);
213	}
214	return map;
215out_delete:
216	nsinfo__put(nsi);
217	free(map);
218	return NULL;
219}
220
221/*
222 * Constructor variant for modules (where we know from /proc/modules where
223 * they are loaded) and for vmlinux, where only after we load all the
224 * symbols we'll know where it starts and ends.
225 */
226struct map *map__new2(u64 start, struct dso *dso)
227{
228	struct map *map = calloc(1, (sizeof(*map) +
229				     (dso->kernel ? sizeof(struct kmap) : 0)));
230	if (map != NULL) {
231		/*
232		 * ->end will be filled after we load all the symbols
233		 */
234		map__init(map, start, 0, 0, dso);
235	}
236
237	return map;
238}
239
240bool __map__is_kernel(const struct map *map)
241{
242	if (!map->dso->kernel)
243		return false;
244	return machine__kernel_map(map__kmaps((struct map *)map)->machine) == map;
245}
246
247bool __map__is_extra_kernel_map(const struct map *map)
248{
249	struct kmap *kmap = __map__kmap((struct map *)map);
250
251	return kmap && kmap->name[0];
252}
253
254bool __map__is_bpf_prog(const struct map *map)
255{
256	const char *name;
257
258	if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
259		return true;
260
261	/*
262	 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
263	 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
264	 * guess the type based on name.
265	 */
266	name = map->dso->short_name;
267	return name && (strstr(name, "bpf_prog_") == name);
268}
269
270bool __map__is_bpf_image(const struct map *map)
271{
272	const char *name;
273
274	if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
275		return true;
276
277	/*
278	 * If PERF_RECORD_KSYMBOL is not included, the dso will not have
279	 * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
280	 * guess the type based on name.
281	 */
282	name = map->dso->short_name;
283	return name && is_bpf_image(name);
284}
285
286bool __map__is_ool(const struct map *map)
287{
288	return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL;
289}
290
291bool map__has_symbols(const struct map *map)
292{
293	return dso__has_symbols(map->dso);
294}
295
296static void map__exit(struct map *map)
297{
298	BUG_ON(refcount_read(&map->refcnt) != 0);
299	dso__zput(map->dso);
300}
301
302void map__delete(struct map *map)
303{
304	map__exit(map);
305	free(map);
306}
307
308void map__put(struct map *map)
309{
310	if (map && refcount_dec_and_test(&map->refcnt))
311		map__delete(map);
312}
313
314void map__fixup_start(struct map *map)
315{
316	struct rb_root_cached *symbols = &map->dso->symbols;
317	struct rb_node *nd = rb_first_cached(symbols);
318	if (nd != NULL) {
319		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
320		map->start = sym->start;
321	}
322}
323
324void map__fixup_end(struct map *map)
325{
326	struct rb_root_cached *symbols = &map->dso->symbols;
327	struct rb_node *nd = rb_last(&symbols->rb_root);
328	if (nd != NULL) {
329		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
330		map->end = sym->end;
331	}
332}
333
334#define DSO__DELETED "(deleted)"
335
336int map__load(struct map *map)
337{
338	const char *name = map->dso->long_name;
339	int nr;
340
341	if (dso__loaded(map->dso))
342		return 0;
343
344	nr = dso__load(map->dso, map);
345	if (nr < 0) {
346		if (map->dso->has_build_id) {
347			char sbuild_id[SBUILD_ID_SIZE];
348
349			build_id__sprintf(map->dso->build_id,
350					  sizeof(map->dso->build_id),
351					  sbuild_id);
352			pr_debug("%s with build id %s not found", name, sbuild_id);
 
353		} else
354			pr_debug("Failed to open %s", name);
355
356		pr_debug(", continuing without symbols\n");
357		return -1;
358	} else if (nr == 0) {
359#ifdef HAVE_LIBELF_SUPPORT
360		const size_t len = strlen(name);
361		const size_t real_len = len - sizeof(DSO__DELETED);
362
363		if (len > sizeof(DSO__DELETED) &&
364		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
365			pr_debug("%.*s was updated (is prelink enabled?). "
366				"Restart the long running apps that use it!\n",
367				   (int)real_len, name);
368		} else {
369			pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
 
370		}
371#endif
372		return -1;
373	}
 
 
 
 
 
 
374
375	return 0;
376}
377
378struct symbol *map__find_symbol(struct map *map, u64 addr)
 
379{
380	if (map__load(map) < 0)
381		return NULL;
382
383	return dso__find_symbol(map->dso, addr);
384}
385
386struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
 
387{
388	if (map__load(map) < 0)
389		return NULL;
390
391	if (!dso__sorted_by_name(map->dso))
392		dso__sort_by_name(map->dso);
393
394	return dso__find_symbol_by_name(map->dso, name);
395}
396
397struct map *map__clone(struct map *from)
398{
399	size_t size = sizeof(struct map);
400	struct map *map;
 
 
401
402	if (from->dso && from->dso->kernel)
403		size += sizeof(struct kmap);
404
405	map = memdup(from, size);
406	if (map != NULL) {
407		refcount_set(&map->refcnt, 1);
408		RB_CLEAR_NODE(&map->rb_node);
409		dso__get(map->dso);
 
 
 
 
410	}
411
412	return map;
 
 
 
413}
414
415size_t map__fprintf(struct map *map, FILE *fp)
416{
417	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
418		       map->start, map->end, map->pgoff, map->dso->name);
419}
420
421size_t map__fprintf_dsoname(struct map *map, FILE *fp)
422{
423	char buf[symbol_conf.pad_output_len_dso + 1];
424	const char *dsoname = "[unknown]";
425
426	if (map && map->dso) {
427		if (symbol_conf.show_kernel_path && map->dso->long_name)
428			dsoname = map->dso->long_name;
429		else
430			dsoname = map->dso->name;
431	}
432
433	if (symbol_conf.pad_output_len_dso) {
434		scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname);
435		dsoname = buf;
436	}
437
438	return fprintf(fp, "%s", dsoname);
439}
440
441char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
442{
443	if (map == NULL)
444		return SRCLINE_UNKNOWN;
445	return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
446}
447
448int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
449			 FILE *fp)
450{
451	int ret = 0;
452
453	if (map && map->dso) {
454		char *srcline = map__srcline(map, addr, NULL);
455		if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)
456			ret = fprintf(fp, "%s%s", prefix, srcline);
457		free_srcline(srcline);
458	}
459	return ret;
460}
461
462void srccode_state_free(struct srccode_state *state)
463{
464	zfree(&state->srcfile);
465	state->line = 0;
466}
467
468/**
469 * map__rip_2objdump - convert symbol start address to objdump address.
470 * @map: memory map
471 * @rip: symbol start address
472 *
473 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
474 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
475 * relative to section start.
476 *
477 * Return: Address suitable for passing to "objdump --start-address="
478 */
479u64 map__rip_2objdump(struct map *map, u64 rip)
480{
481	struct kmap *kmap = __map__kmap(map);
482
483	/*
484	 * vmlinux does not have program headers for PTI entry trampolines and
485	 * kcore may not either. However the trampoline object code is on the
486	 * main kernel map, so just use that instead.
487	 */
488	if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
489		struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
490
491		if (kernel_map)
492			map = kernel_map;
493	}
494
495	if (!map->dso->adjust_symbols)
496		return rip;
497
498	if (map->dso->rel)
499		return rip - map->pgoff;
500
501	/*
502	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
503	 * but all kernel modules are ET_REL, so won't get here.
504	 */
505	if (map->dso->kernel == DSO_SPACE__USER)
506		return rip + map->dso->text_offset;
507
508	return map->unmap_ip(map, rip) - map->reloc;
509}
510
511/**
512 * map__objdump_2mem - convert objdump address to a memory address.
513 * @map: memory map
514 * @ip: objdump address
515 *
516 * Closely related to map__rip_2objdump(), this function takes an address from
517 * objdump and converts it to a memory address.  Note this assumes that @map
518 * contains the address.  To be sure the result is valid, check it forwards
519 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
520 *
521 * Return: Memory address.
522 */
523u64 map__objdump_2mem(struct map *map, u64 ip)
524{
525	if (!map->dso->adjust_symbols)
526		return map->unmap_ip(map, ip);
527
528	if (map->dso->rel)
529		return map->unmap_ip(map, ip + map->pgoff);
530
531	/*
532	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
533	 * but all kernel modules are ET_REL, so won't get here.
534	 */
535	if (map->dso->kernel == DSO_SPACE__USER)
536		return map->unmap_ip(map, ip - map->dso->text_offset);
537
538	return ip + map->reloc;
539}
540
541void maps__init(struct maps *maps, struct machine *machine)
542{
543	maps->entries = RB_ROOT;
544	init_rwsem(&maps->lock);
545	maps->machine = machine;
546	maps->last_search_by_name = NULL;
547	maps->nr_maps = 0;
548	maps->maps_by_name = NULL;
549	refcount_set(&maps->refcnt, 1);
550}
551
552static void __maps__free_maps_by_name(struct maps *maps)
553{
554	/*
555	 * Free everything to try to do it from the rbtree in the next search
556	 */
557	zfree(&maps->maps_by_name);
558	maps->nr_maps_allocated = 0;
559}
560
561void maps__insert(struct maps *maps, struct map *map)
562{
563	down_write(&maps->lock);
564	__maps__insert(maps, map);
565	++maps->nr_maps;
566
567	if (map->dso && map->dso->kernel) {
568		struct kmap *kmap = map__kmap(map);
569
570		if (kmap)
571			kmap->kmaps = maps;
572		else
573			pr_err("Internal error: kernel dso with non kernel map\n");
574	}
575
576
577	/*
578	 * If we already performed some search by name, then we need to add the just
579	 * inserted map and resort.
580	 */
581	if (maps->maps_by_name) {
582		if (maps->nr_maps > maps->nr_maps_allocated) {
583			int nr_allocate = maps->nr_maps * 2;
584			struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
585
586			if (maps_by_name == NULL) {
587				__maps__free_maps_by_name(maps);
588				up_write(&maps->lock);
589				return;
590			}
591
592			maps->maps_by_name = maps_by_name;
593			maps->nr_maps_allocated = nr_allocate;
594		}
595		maps->maps_by_name[maps->nr_maps - 1] = map;
596		__maps__sort_by_name(maps);
597	}
598	up_write(&maps->lock);
599}
600
601static void __maps__remove(struct maps *maps, struct map *map)
602{
603	rb_erase_init(&map->rb_node, &maps->entries);
604	map__put(map);
605}
606
607void maps__remove(struct maps *maps, struct map *map)
608{
609	down_write(&maps->lock);
610	if (maps->last_search_by_name == map)
611		maps->last_search_by_name = NULL;
612
613	__maps__remove(maps, map);
614	--maps->nr_maps;
615	if (maps->maps_by_name)
616		__maps__free_maps_by_name(maps);
617	up_write(&maps->lock);
618}
619
620static void __maps__purge(struct maps *maps)
621{
622	struct map *pos, *next;
623
624	maps__for_each_entry_safe(maps, pos, next) {
625		rb_erase_init(&pos->rb_node,  &maps->entries);
626		map__put(pos);
627	}
628}
629
630void maps__exit(struct maps *maps)
631{
632	down_write(&maps->lock);
633	__maps__purge(maps);
634	up_write(&maps->lock);
635}
636
637bool maps__empty(struct maps *maps)
638{
639	return !maps__first(maps);
640}
641
642struct maps *maps__new(struct machine *machine)
643{
644	struct maps *maps = zalloc(sizeof(*maps));
645
646	if (maps != NULL)
647		maps__init(maps, machine);
648
649	return maps;
 
 
 
 
650}
651
652void maps__delete(struct maps *maps)
 
 
 
653{
654	maps__exit(maps);
655	unwind__finish_access(maps);
656	free(maps);
657}
658
659void maps__put(struct maps *maps)
660{
661	if (maps && refcount_dec_and_test(&maps->refcnt))
662		maps__delete(maps);
663}
664
665struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
666{
667	struct map *map = maps__find(maps, addr);
668
669	/* Ensure map is loaded before using map->map_ip */
670	if (map != NULL && map__load(map) >= 0) {
671		if (mapp != NULL)
672			*mapp = map;
673		return map__find_symbol(map, map->map_ip(map, addr));
674	}
675
676	return NULL;
677}
678
679static bool map__contains_symbol(struct map *map, struct symbol *sym)
680{
681	u64 ip = map->unmap_ip(map, sym->start);
682
683	return ip >= map->start && ip < map->end;
684}
685
686struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
687{
688	struct symbol *sym;
689	struct map *pos;
690
691	down_read(&maps->lock);
692
693	maps__for_each_entry(maps, pos) {
694		sym = map__find_symbol_by_name(pos, name);
695
696		if (sym == NULL)
697			continue;
698		if (!map__contains_symbol(pos, sym)) {
699			sym = NULL;
700			continue;
701		}
702		if (mapp != NULL)
703			*mapp = pos;
704		goto out;
705	}
706
707	sym = NULL;
708out:
709	up_read(&maps->lock);
710	return sym;
711}
712
713int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
 
714{
715	if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
716		if (maps == NULL)
717			return -1;
718		ams->ms.map = maps__find(maps, ams->addr);
719		if (ams->ms.map == NULL)
720			return -1;
 
 
 
 
 
721	}
722
723	ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
724	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
725
726	return ams->ms.sym ? 0 : -1;
 
 
 
 
 
727}
728
729size_t maps__fprintf(struct maps *maps, FILE *fp)
 
 
730{
 
731	size_t printed = 0;
732	struct map *pos;
733
734	down_read(&maps->lock);
735
736	maps__for_each_entry(maps, pos) {
737		printed += fprintf(fp, "Map:");
738		printed += map__fprintf(pos, fp);
739		if (verbose > 2) {
740			printed += dso__fprintf(pos->dso, fp);
741			printed += fprintf(fp, "--\n");
742		}
743	}
 
 
744
745	up_read(&maps->lock);
 
 
 
 
 
 
 
746
747	return printed;
 
 
 
 
748}
749
750int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
 
751{
752	struct rb_root *root;
753	struct rb_node *next, *first;
754	int err = 0;
755
756	down_write(&maps->lock);
757
758	root = &maps->entries;
759
760	/*
761	 * Find first map where end > map->start.
762	 * Same as find_vma() in kernel.
763	 */
764	next = root->rb_node;
765	first = NULL;
766	while (next) {
767		struct map *pos = rb_entry(next, struct map, rb_node);
768
769		if (pos->end > map->start) {
770			first = next;
771			if (pos->start <= map->start)
772				break;
773			next = next->rb_left;
774		} else
775			next = next->rb_right;
776	}
777
778	next = first;
779	while (next) {
780		struct map *pos = rb_entry(next, struct map, rb_node);
781		next = rb_next(&pos->rb_node);
782
783		/*
784		 * Stop if current map starts after map->end.
785		 * Maps are ordered by start: next will not overlap for sure.
786		 */
787		if (pos->start >= map->end)
788			break;
789
790		if (verbose >= 2) {
791
792			if (use_browser) {
793				pr_debug("overlapping maps in %s (disable tui for more info)\n",
794					   map->dso->name);
795			} else {
796				fputs("overlapping maps:\n", fp);
797				map__fprintf(map, fp);
798				map__fprintf(pos, fp);
799			}
800		}
801
802		rb_erase_init(&pos->rb_node, root);
803		/*
804		 * Now check if we need to create new maps for areas not
805		 * overlapped by the new map:
806		 */
807		if (map->start > pos->start) {
808			struct map *before = map__clone(pos);
809
810			if (before == NULL) {
811				err = -ENOMEM;
812				goto put_map;
813			}
814
815			before->end = map->start;
816			__maps__insert(maps, before);
817			if (verbose >= 2 && !use_browser)
818				map__fprintf(before, fp);
819			map__put(before);
820		}
821
822		if (map->end < pos->end) {
823			struct map *after = map__clone(pos);
824
825			if (after == NULL) {
826				err = -ENOMEM;
827				goto put_map;
828			}
829
830			after->start = map->end;
831			after->pgoff += map->end - pos->start;
832			assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
833			__maps__insert(maps, after);
834			if (verbose >= 2 && !use_browser)
835				map__fprintf(after, fp);
836			map__put(after);
837		}
838put_map:
839		map__put(pos);
 
 
 
 
 
 
840
841		if (err)
842			goto out;
843	}
844
845	err = 0;
846out:
847	up_write(&maps->lock);
848	return err;
849}
850
851/*
852 * XXX This should not really _copy_ te maps, but refcount them.
853 */
854int maps__clone(struct thread *thread, struct maps *parent)
 
855{
856	struct maps *maps = thread->maps;
857	int err = -ENOMEM;
858	struct map *map;
 
 
 
 
 
 
 
 
 
 
 
 
859
860	down_read(&parent->lock);
 
 
 
861
862	maps__for_each_entry(parent, map) {
863		struct map *new = map__clone(map);
864		if (new == NULL)
865			goto out_unlock;
 
 
 
866
867		err = unwind__prepare_access(maps, new, NULL);
868		if (err)
869			goto out_unlock;
870
871		maps__insert(maps, new);
872		map__put(new);
873	}
874
875	err = 0;
876out_unlock:
877	up_read(&parent->lock);
878	return err;
879}
880
881static void __maps__insert(struct maps *maps, struct map *map)
882{
883	struct rb_node **p = &maps->entries.rb_node;
884	struct rb_node *parent = NULL;
885	const u64 ip = map->start;
886	struct map *m;
887
888	while (*p != NULL) {
889		parent = *p;
890		m = rb_entry(parent, struct map, rb_node);
891		if (ip < m->start)
892			p = &(*p)->rb_left;
893		else
894			p = &(*p)->rb_right;
895	}
896
897	rb_link_node(&map->rb_node, parent, p);
898	rb_insert_color(&map->rb_node, &maps->entries);
899	map__get(map);
900}
901
902struct map *maps__find(struct maps *maps, u64 ip)
903{
904	struct rb_node *p;
 
 
 
 
 
 
905	struct map *m;
906
907	down_read(&maps->lock);
908
909	p = maps->entries.rb_node;
910	while (p != NULL) {
911		m = rb_entry(p, struct map, rb_node);
912		if (ip < m->start)
913			p = p->rb_left;
914		else if (ip >= m->end)
915			p = p->rb_right;
916		else
917			goto out;
918	}
919
920	m = NULL;
921out:
922	up_read(&maps->lock);
923	return m;
924}
925
926struct map *maps__first(struct maps *maps)
927{
928	struct rb_node *first = rb_first(&maps->entries);
 
 
 
929
930	if (first)
931		return rb_entry(first, struct map, rb_node);
932	return NULL;
 
 
 
 
 
933}
934
935static struct map *__map__next(struct map *map)
936{
937	struct rb_node *next = rb_next(&map->rb_node);
 
 
 
 
 
 
938
939	if (next)
940		return rb_entry(next, struct map, rb_node);
941	return NULL;
 
 
 
 
942}
943
944struct map *map__next(struct map *map)
945{
946	return map ? __map__next(map) : NULL;
 
947}
948
949struct kmap *__map__kmap(struct map *map)
 
950{
951	if (!map->dso || !map->dso->kernel)
 
 
 
 
952		return NULL;
953	return (struct kmap *)(map + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
954}
955
956struct kmap *map__kmap(struct map *map)
957{
958	struct kmap *kmap = __map__kmap(map);
 
 
 
959
960	if (!kmap)
961		pr_err("Internal error: map__kmap with a non-kernel map\n");
962	return kmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963}
964
965struct maps *map__kmaps(struct map *map)
966{
967	struct kmap *kmap = map__kmap(map);
968
969	if (!kmap || !kmap->kmaps) {
970		pr_err("Internal error: map__kmaps with a non-kernel map\n");
971		return NULL;
972	}
973	return kmap->kmaps;
 
 
 
 
 
 
 
 
 
 
 
974}