Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1#include "symbol.h"
 
  2#include <errno.h>
  3#include <inttypes.h>
  4#include <limits.h>
  5#include <stdlib.h>
  6#include <string.h>
  7#include <stdio.h>
  8#include <unistd.h>
 
 
  9#include "map.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10
 11const char *map_type__name[MAP__NR_TYPES] = {
 12	[MAP__FUNCTION] = "Functions",
 13	[MAP__VARIABLE] = "Variables",
 14};
 15
 16static inline int is_anon_memory(const char *filename)
 17{
 18	return strcmp(filename, "//anon") == 0;
 19}
 20
 21static inline int is_no_dso_memory(const char *filename)
 22{
 23	return !strcmp(filename, "[stack]") ||
 24	       !strcmp(filename, "[vdso]")  ||
 25	       !strcmp(filename, "[heap]");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26}
 27
 28void map__init(struct map *self, enum map_type type,
 29	       u64 start, u64 end, u64 pgoff, struct dso *dso)
 30{
 31	self->type     = type;
 32	self->start    = start;
 33	self->end      = end;
 34	self->pgoff    = pgoff;
 35	self->dso      = dso;
 36	self->map_ip   = map__map_ip;
 37	self->unmap_ip = map__unmap_ip;
 38	RB_CLEAR_NODE(&self->rb_node);
 39	self->groups   = NULL;
 40	self->referenced = false;
 41	self->erange_warned = false;
 42}
 43
 44struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
 45		     u64 pgoff, u32 pid, char *filename,
 46		     enum map_type type)
 
 47{
 48	struct map *self = malloc(sizeof(*self));
 
 
 49
 50	if (self != NULL) {
 51		char newfilename[PATH_MAX];
 52		struct dso *dso;
 53		int anon, no_dso;
 54
 55		anon = is_anon_memory(filename);
 
 
 56		no_dso = is_no_dso_memory(filename);
 57
 58		if (anon) {
 59			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
 
 
 
 
 60			filename = newfilename;
 61		}
 62
 63		dso = __dsos__findnew(dsos__list, filename);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64		if (dso == NULL)
 65			goto out_delete;
 66
 67		map__init(self, type, start, start + len, pgoff, dso);
 68
 69		if (anon || no_dso) {
 70			self->map_ip = self->unmap_ip = identity__map_ip;
 71
 72			/*
 73			 * Set memory without DSO as loaded. All map__find_*
 74			 * functions still return NULL, and we avoid the
 75			 * unnecessary map__load warning.
 76			 */
 77			if (no_dso)
 78				dso__set_loaded(dso, self->type);
 79		}
 
 
 
 
 
 
 80	}
 81	return self;
 82out_delete:
 83	free(self);
 
 84	return NULL;
 85}
 86
 87void map__delete(struct map *self)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88{
 89	free(self);
 
 
 90}
 91
 92void map__fixup_start(struct map *self)
 93{
 94	struct rb_root *symbols = &self->dso->symbols[self->type];
 95	struct rb_node *nd = rb_first(symbols);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96	if (nd != NULL) {
 97		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
 98		self->start = sym->start;
 99	}
100}
101
102void map__fixup_end(struct map *self)
103{
104	struct rb_root *symbols = &self->dso->symbols[self->type];
105	struct rb_node *nd = rb_last(symbols);
106	if (nd != NULL) {
107		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
108		self->end = sym->end;
109	}
110}
111
112#define DSO__DELETED "(deleted)"
113
114int map__load(struct map *self, symbol_filter_t filter)
115{
116	const char *name = self->dso->long_name;
117	int nr;
118
119	if (dso__loaded(self->dso, self->type))
120		return 0;
121
122	nr = dso__load(self->dso, self, filter);
123	if (nr < 0) {
124		if (self->dso->has_build_id) {
125			char sbuild_id[BUILD_ID_SIZE * 2 + 1];
126
127			build_id__sprintf(self->dso->build_id,
128					  sizeof(self->dso->build_id),
129					  sbuild_id);
130			pr_warning("%s with build id %s not found",
131				   name, sbuild_id);
132		} else
133			pr_warning("Failed to open %s", name);
134
135		pr_warning(", continuing without symbols\n");
136		return -1;
137	} else if (nr == 0) {
 
138		const size_t len = strlen(name);
139		const size_t real_len = len - sizeof(DSO__DELETED);
140
141		if (len > sizeof(DSO__DELETED) &&
142		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
143			pr_warning("%.*s was updated (is prelink enabled?). "
144				"Restart the long running apps that use it!\n",
145				   (int)real_len, name);
146		} else {
147			pr_warning("no symbols found in %s, maybe install "
148				   "a debug package?\n", name);
149		}
150
151		return -1;
152	}
153	/*
154	 * Only applies to the kernel, as its symtabs aren't relative like the
155	 * module ones.
156	 */
157	if (self->dso->kernel)
158		map__reloc_vmlinux(self);
159
160	return 0;
161}
162
163struct symbol *map__find_symbol(struct map *self, u64 addr,
164				symbol_filter_t filter)
165{
166	if (map__load(self, filter) < 0)
167		return NULL;
168
169	return dso__find_symbol(self->dso, self->type, addr);
170}
171
172struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
173					symbol_filter_t filter)
174{
175	if (map__load(self, filter) < 0)
176		return NULL;
177
178	if (!dso__sorted_by_name(self->dso, self->type))
179		dso__sort_by_name(self->dso, self->type);
180
181	return dso__find_symbol_by_name(self->dso, self->type, name);
182}
183
184struct map *map__clone(struct map *self)
185{
186	struct map *map = malloc(sizeof(*self));
187
188	if (!map)
189		return NULL;
190
191	memcpy(map, self, sizeof(*self));
 
192
193	return map;
194}
195
196int map__overlap(struct map *l, struct map *r)
197{
198	if (l->start > r->start) {
199		struct map *t = l;
200		l = r;
201		r = t;
202	}
203
204	if (l->end > r->start)
205		return 1;
206
207	return 0;
208}
209
210size_t map__fprintf(struct map *self, FILE *fp)
211{
212	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
213		       self->start, self->end, self->pgoff, self->dso->name);
214}
215
216size_t map__fprintf_dsoname(struct map *map, FILE *fp)
217{
218	const char *dsoname;
 
219
220	if (map && map->dso && (map->dso->name || map->dso->long_name)) {
221		if (symbol_conf.show_kernel_path && map->dso->long_name)
222			dsoname = map->dso->long_name;
223		else if (map->dso->name)
224			dsoname = map->dso->name;
225	} else
226		dsoname = "[unknown]";
 
 
 
 
227
228	return fprintf(fp, "%s", dsoname);
229}
230
231/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
233 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
 
 
 
234 */
235u64 map__rip_2objdump(struct map *map, u64 rip)
236{
237	u64 addr = map->dso->adjust_symbols ?
238			map->unmap_ip(map, rip) :	/* RIP -> IP */
239			rip;
240	return addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241}
242
243u64 map__objdump_2ip(struct map *map, u64 addr)
244{
245	u64 ip = map->dso->adjust_symbols ?
246			addr :
247			map->unmap_ip(map, addr);	/* RIP -> IP */
248	return ip;
 
 
 
249}
250
251void map_groups__init(struct map_groups *mg)
252{
253	int i;
254	for (i = 0; i < MAP__NR_TYPES; ++i) {
255		mg->maps[i] = RB_ROOT;
256		INIT_LIST_HEAD(&mg->removed_maps[i]);
257	}
258	mg->machine = NULL;
259}
260
261static void maps__delete(struct rb_root *maps)
262{
263	struct rb_node *next = rb_first(maps);
 
 
264
265	while (next) {
266		struct map *pos = rb_entry(next, struct map, rb_node);
267
268		next = rb_next(&pos->rb_node);
269		rb_erase(&pos->rb_node, maps);
270		map__delete(pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271	}
 
272}
273
274static void maps__delete_removed(struct list_head *maps)
275{
276	struct map *pos, *n;
 
 
277
278	list_for_each_entry_safe(pos, n, maps, node) {
279		list_del(&pos->node);
280		map__delete(pos);
281	}
 
 
 
 
 
 
 
282}
283
284void map_groups__exit(struct map_groups *mg)
285{
286	int i;
287
288	for (i = 0; i < MAP__NR_TYPES; ++i) {
289		maps__delete(&mg->maps[i]);
290		maps__delete_removed(&mg->removed_maps[i]);
291	}
292}
293
294void map_groups__flush(struct map_groups *mg)
295{
296	int type;
 
 
 
297
298	for (type = 0; type < MAP__NR_TYPES; type++) {
299		struct rb_root *root = &mg->maps[type];
300		struct rb_node *next = rb_first(root);
 
301
302		while (next) {
303			struct map *pos = rb_entry(next, struct map, rb_node);
304			next = rb_next(&pos->rb_node);
305			rb_erase(&pos->rb_node, root);
306			/*
307			 * We may have references to this map, for
308			 * instance in some hist_entry instances, so
309			 * just move them to a separate list.
310			 */
311			list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
312		}
313	}
314}
315
316struct symbol *map_groups__find_symbol(struct map_groups *mg,
317				       enum map_type type, u64 addr,
318				       struct map **mapp,
319				       symbol_filter_t filter)
320{
321	struct map *map = map_groups__find(mg, type, addr);
 
 
 
322
323	if (map != NULL) {
 
 
 
 
 
 
 
 
 
 
 
324		if (mapp != NULL)
325			*mapp = map;
326		return map__find_symbol(map, map->map_ip(map, addr), filter);
327	}
328
329	return NULL;
330}
331
332struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
333					       enum map_type type,
334					       const char *name,
335					       struct map **mapp,
336					       symbol_filter_t filter)
337{
338	struct rb_node *nd;
339
340	for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
341		struct map *pos = rb_entry(nd, struct map, rb_node);
342		struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
 
 
 
 
 
343
344		if (sym == NULL)
345			continue;
 
 
 
 
346		if (mapp != NULL)
347			*mapp = pos;
348		return sym;
349	}
350
351	return NULL;
 
 
 
352}
353
354size_t __map_groups__fprintf_maps(struct map_groups *mg,
355				  enum map_type type, int verbose, FILE *fp)
356{
357	size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
358	struct rb_node *nd;
359
360	for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
361		struct map *pos = rb_entry(nd, struct map, rb_node);
362		printed += fprintf(fp, "Map:");
363		printed += map__fprintf(pos, fp);
364		if (verbose > 2) {
365			printed += dso__fprintf(pos->dso, type, fp);
366			printed += fprintf(fp, "--\n");
367		}
368	}
369
370	return printed;
371}
372
373size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
374{
375	size_t printed = 0, i;
376	for (i = 0; i < MAP__NR_TYPES; ++i)
377		printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
378	return printed;
379}
380
381static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
382						 enum map_type type,
383						 int verbose, FILE *fp)
384{
385	struct map *pos;
386	size_t printed = 0;
 
 
 
387
388	list_for_each_entry(pos, &mg->removed_maps[type], node) {
389		printed += fprintf(fp, "Map:");
390		printed += map__fprintf(pos, fp);
391		if (verbose > 1) {
392			printed += dso__fprintf(pos->dso, type, fp);
393			printed += fprintf(fp, "--\n");
394		}
395	}
396	return printed;
397}
398
399static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
400					       int verbose, FILE *fp)
401{
402	size_t printed = 0, i;
403	for (i = 0; i < MAP__NR_TYPES; ++i)
404		printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
405	return printed;
406}
407
408size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
409{
410	size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
411	printed += fprintf(fp, "Removed maps:\n");
412	return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
413}
414
415int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
416				   int verbose, FILE *fp)
417{
418	struct rb_root *root = &mg->maps[map->type];
419	struct rb_node *next = rb_first(root);
420	int err = 0;
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422	while (next) {
423		struct map *pos = rb_entry(next, struct map, rb_node);
424		next = rb_next(&pos->rb_node);
425
426		if (!map__overlap(pos, map))
427			continue;
 
 
 
 
428
429		if (verbose >= 2) {
430			fputs("overlapping maps:\n", fp);
431			map__fprintf(map, fp);
432			map__fprintf(pos, fp);
 
 
 
 
 
 
433		}
434
435		rb_erase(&pos->rb_node, root);
436		/*
437		 * Now check if we need to create new maps for areas not
438		 * overlapped by the new map:
439		 */
440		if (map->start > pos->start) {
441			struct map *before = map__clone(pos);
442
443			if (before == NULL) {
444				err = -ENOMEM;
445				goto move_map;
446			}
447
448			before->end = map->start - 1;
449			map_groups__insert(mg, before);
450			if (verbose >= 2)
451				map__fprintf(before, fp);
 
452		}
453
454		if (map->end < pos->end) {
455			struct map *after = map__clone(pos);
456
457			if (after == NULL) {
458				err = -ENOMEM;
459				goto move_map;
460			}
461
462			after->start = map->end + 1;
463			map_groups__insert(mg, after);
464			if (verbose >= 2)
 
 
465				map__fprintf(after, fp);
 
466		}
467move_map:
468		/*
469		 * If we have references, just move them to a separate list.
470		 */
471		if (pos->referenced)
472			list_add_tail(&pos->node, &mg->removed_maps[map->type]);
473		else
474			map__delete(pos);
475
476		if (err)
477			return err;
478	}
479
480	return 0;
 
 
 
481}
482
483/*
484 * XXX This should not really _copy_ te maps, but refcount them.
485 */
486int map_groups__clone(struct map_groups *mg,
487		      struct map_groups *parent, enum map_type type)
488{
489	struct rb_node *nd;
490	for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
491		struct map *map = rb_entry(nd, struct map, rb_node);
492		struct map *new = map__clone(map);
493		if (new == NULL)
494			return -ENOMEM;
495		map_groups__insert(mg, new);
496	}
497	return 0;
498}
499
500static u64 map__reloc_map_ip(struct map *map, u64 ip)
501{
502	return ip + (s64)map->pgoff;
503}
504
505static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
506{
507	return ip - (s64)map->pgoff;
508}
509
510void map__reloc_vmlinux(struct map *self)
511{
512	struct kmap *kmap = map__kmap(self);
513	s64 reloc;
514
515	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
516		return;
 
 
517
518	reloc = (kmap->ref_reloc_sym->unrelocated_addr -
519		 kmap->ref_reloc_sym->addr);
 
520
521	if (!reloc)
522		return;
 
523
524	self->map_ip   = map__reloc_map_ip;
525	self->unmap_ip = map__reloc_unmap_ip;
526	self->pgoff    = reloc;
 
527}
528
529void maps__insert(struct rb_root *maps, struct map *map)
530{
531	struct rb_node **p = &maps->rb_node;
532	struct rb_node *parent = NULL;
533	const u64 ip = map->start;
534	struct map *m;
535
536	while (*p != NULL) {
537		parent = *p;
538		m = rb_entry(parent, struct map, rb_node);
539		if (ip < m->start)
540			p = &(*p)->rb_left;
541		else
542			p = &(*p)->rb_right;
543	}
544
545	rb_link_node(&map->rb_node, parent, p);
546	rb_insert_color(&map->rb_node, maps);
547}
548
549void maps__remove(struct rb_root *self, struct map *map)
550{
551	rb_erase(&map->rb_node, self);
552}
553
554struct map *maps__find(struct rb_root *maps, u64 ip)
555{
556	struct rb_node **p = &maps->rb_node;
557	struct rb_node *parent = NULL;
558	struct map *m;
559
560	while (*p != NULL) {
561		parent = *p;
562		m = rb_entry(parent, struct map, rb_node);
 
 
563		if (ip < m->start)
564			p = &(*p)->rb_left;
565		else if (ip > m->end)
566			p = &(*p)->rb_right;
567		else
568			return m;
569	}
570
571	return NULL;
 
 
 
572}
573
574int machine__init(struct machine *self, const char *root_dir, pid_t pid)
575{
576	map_groups__init(&self->kmaps);
577	RB_CLEAR_NODE(&self->rb_node);
578	INIT_LIST_HEAD(&self->user_dsos);
579	INIT_LIST_HEAD(&self->kernel_dsos);
580
581	self->threads = RB_ROOT;
582	INIT_LIST_HEAD(&self->dead_threads);
583	self->last_match = NULL;
584
585	self->kmaps.machine = self;
586	self->pid	    = pid;
587	self->root_dir      = strdup(root_dir);
588	return self->root_dir == NULL ? -ENOMEM : 0;
589}
590
591static void dsos__delete(struct list_head *self)
592{
593	struct dso *pos, *n;
594
595	list_for_each_entry_safe(pos, n, self, node) {
596		list_del(&pos->node);
597		dso__delete(pos);
598	}
599}
600
601void machine__exit(struct machine *self)
602{
603	map_groups__exit(&self->kmaps);
604	dsos__delete(&self->user_dsos);
605	dsos__delete(&self->kernel_dsos);
606	free(self->root_dir);
607	self->root_dir = NULL;
608}
609
610void machine__delete(struct machine *self)
611{
612	machine__exit(self);
613	free(self);
614}
615
616struct machine *machines__add(struct rb_root *self, pid_t pid,
617			      const char *root_dir)
618{
619	struct rb_node **p = &self->rb_node;
620	struct rb_node *parent = NULL;
621	struct machine *pos, *machine = malloc(sizeof(*machine));
622
623	if (!machine)
624		return NULL;
625
626	if (machine__init(machine, root_dir, pid) != 0) {
627		free(machine);
628		return NULL;
629	}
630
631	while (*p != NULL) {
632		parent = *p;
633		pos = rb_entry(parent, struct machine, rb_node);
634		if (pid < pos->pid)
635			p = &(*p)->rb_left;
636		else
637			p = &(*p)->rb_right;
638	}
639
640	rb_link_node(&machine->rb_node, parent, p);
641	rb_insert_color(&machine->rb_node, self);
642
643	return machine;
644}
645
646struct machine *machines__find(struct rb_root *self, pid_t pid)
647{
648	struct rb_node **p = &self->rb_node;
649	struct rb_node *parent = NULL;
650	struct machine *machine;
651	struct machine *default_machine = NULL;
652
653	while (*p != NULL) {
654		parent = *p;
655		machine = rb_entry(parent, struct machine, rb_node);
656		if (pid < machine->pid)
657			p = &(*p)->rb_left;
658		else if (pid > machine->pid)
659			p = &(*p)->rb_right;
660		else
661			return machine;
662		if (!machine->pid)
663			default_machine = machine;
664	}
665
666	return default_machine;
 
 
667}
668
669struct machine *machines__findnew(struct rb_root *self, pid_t pid)
670{
671	char path[PATH_MAX];
672	const char *root_dir = "";
673	struct machine *machine = machines__find(self, pid);
674
675	if (machine && (machine->pid == pid))
676		goto out;
677
678	if ((pid != HOST_KERNEL_ID) &&
679	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
680	    (symbol_conf.guestmount)) {
681		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
682		if (access(path, R_OK)) {
683			pr_err("Can't access file %s\n", path);
684			machine = NULL;
685			goto out;
686		}
687		root_dir = path;
688	}
689
690	machine = machines__add(self, pid, root_dir);
691
692out:
693	return machine;
694}
695
696void machines__process(struct rb_root *self, machine__process_t process, void *data)
697{
698	struct rb_node *nd;
699
700	for (nd = rb_first(self); nd; nd = rb_next(nd)) {
701		struct machine *pos = rb_entry(nd, struct machine, rb_node);
702		process(pos, data);
703	}
704}
705
706char *machine__mmap_name(struct machine *self, char *bf, size_t size)
707{
708	if (machine__is_host(self))
709		snprintf(bf, size, "[%s]", "kernel.kallsyms");
710	else if (machine__is_default_guest(self))
711		snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
712	else
713		snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
714
715	return bf;
716}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include "symbol.h"
  3#include <assert.h>
  4#include <errno.h>
  5#include <inttypes.h>
  6#include <limits.h>
  7#include <stdlib.h>
  8#include <string.h>
  9#include <stdio.h>
 10#include <unistd.h>
 11#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
 12#include "dso.h"
 13#include "map.h"
 14#include "map_symbol.h"
 15#include "thread.h"
 16#include "vdso.h"
 17#include "build-id.h"
 18#include "debug.h"
 19#include "machine.h"
 20#include <linux/string.h>
 21#include <linux/zalloc.h>
 22#include "srcline.h"
 23#include "namespaces.h"
 24#include "unwind.h"
 25#include "srccode.h"
 26#include "ui/ui.h"
 27
 28static void __maps__insert(struct maps *maps, struct map *map);
 29
 30static inline int is_android_lib(const char *filename)
 31{
 32	return strstarts(filename, "/data/app-lib/") ||
 33	       strstarts(filename, "/system/lib/");
 34}
 35
 36static inline bool replace_android_lib(const char *filename, char *newfilename)
 37{
 38	const char *libname;
 39	char *app_abi;
 40	size_t app_abi_length, new_length;
 41	size_t lib_length = 0;
 42
 43	libname  = strrchr(filename, '/');
 44	if (libname)
 45		lib_length = strlen(libname);
 46
 47	app_abi = getenv("APP_ABI");
 48	if (!app_abi)
 49		return false;
 50
 51	app_abi_length = strlen(app_abi);
 52
 53	if (strstarts(filename, "/data/app-lib/")) {
 54		char *apk_path;
 55
 56		if (!app_abi_length)
 57			return false;
 58
 59		new_length = 7 + app_abi_length + lib_length;
 60
 61		apk_path = getenv("APK_PATH");
 62		if (apk_path) {
 63			new_length += strlen(apk_path) + 1;
 64			if (new_length > PATH_MAX)
 65				return false;
 66			snprintf(newfilename, new_length,
 67				 "%s/libs/%s/%s", apk_path, app_abi, libname);
 68		} else {
 69			if (new_length > PATH_MAX)
 70				return false;
 71			snprintf(newfilename, new_length,
 72				 "libs/%s/%s", app_abi, libname);
 73		}
 74		return true;
 75	}
 76
 77	if (strstarts(filename, "/system/lib/")) {
 78		char *ndk, *app;
 79		const char *arch;
 80		int ndk_length, app_length;
 81
 82		ndk = getenv("NDK_ROOT");
 83		app = getenv("APP_PLATFORM");
 
 
 84
 85		if (!(ndk && app))
 86			return false;
 87
 88		ndk_length = strlen(ndk);
 89		app_length = strlen(app);
 90
 91		if (!(ndk_length && app_length && app_abi_length))
 92			return false;
 93
 94		arch = !strncmp(app_abi, "arm", 3) ? "arm" :
 95		       !strncmp(app_abi, "mips", 4) ? "mips" :
 96		       !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
 97
 98		if (!arch)
 99			return false;
100
101		new_length = 27 + ndk_length +
102			     app_length + lib_length
103			   + strlen(arch);
104
105		if (new_length > PATH_MAX)
106			return false;
107		snprintf(newfilename, new_length,
108			"%.*s/platforms/%.*s/arch-%s/usr/lib/%s",
109			ndk_length, ndk, app_length, app, arch, libname);
110
111		return true;
112	}
113	return false;
114}
115
116void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
 
117{
118	map->start    = start;
119	map->end      = end;
120	map->pgoff    = pgoff;
121	map->reloc    = 0;
122	map->dso      = dso__get(dso);
123	map->map_ip   = map__map_ip;
124	map->unmap_ip = map__unmap_ip;
125	RB_CLEAR_NODE(&map->rb_node);
126	map->erange_warned = false;
127	refcount_set(&map->refcnt, 1);
 
128}
129
130struct map *map__new(struct machine *machine, u64 start, u64 len,
131		     u64 pgoff, struct dso_id *id,
132		     u32 prot, u32 flags, struct build_id *bid,
133		     char *filename, struct thread *thread)
134{
135	struct map *map = malloc(sizeof(*map));
136	struct nsinfo *nsi = NULL;
137	struct nsinfo *nnsi;
138
139	if (map != NULL) {
140		char newfilename[PATH_MAX];
141		struct dso *dso;
142		int anon, no_dso, vdso, android;
143
144		android = is_android_lib(filename);
145		anon = is_anon_memory(filename) || flags & MAP_HUGETLB;
146		vdso = is_vdso_map(filename);
147		no_dso = is_no_dso_memory(filename);
148		map->prot = prot;
149		map->flags = flags;
150		nsi = nsinfo__get(thread->nsinfo);
151
152		if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
153			snprintf(newfilename, sizeof(newfilename),
154				 "/tmp/perf-%d.map", nsi->pid);
155			filename = newfilename;
156		}
157
158		if (android) {
159			if (replace_android_lib(filename, newfilename))
160				filename = newfilename;
161		}
162
163		if (vdso) {
164			/* The vdso maps are always on the host and not the
165			 * container.  Ensure that we don't use setns to look
166			 * them up.
167			 */
168			nnsi = nsinfo__copy(nsi);
169			if (nnsi) {
170				nsinfo__put(nsi);
171				nnsi->need_setns = false;
172				nsi = nnsi;
173			}
174			pgoff = 0;
175			dso = machine__findnew_vdso(machine, thread);
176		} else
177			dso = machine__findnew_dso_id(machine, filename, id);
178
179		if (dso == NULL)
180			goto out_delete;
181
182		map__init(map, start, start + len, pgoff, dso);
183
184		if (anon || no_dso) {
185			map->map_ip = map->unmap_ip = identity__map_ip;
186
187			/*
188			 * Set memory without DSO as loaded. All map__find_*
189			 * functions still return NULL, and we avoid the
190			 * unnecessary map__load warning.
191			 */
192			if (!(prot & PROT_EXEC))
193				dso__set_loaded(dso);
194		}
195		dso->nsinfo = nsi;
196
197		if (build_id__is_defined(bid))
198			dso__set_build_id(dso, bid);
199
200		dso__put(dso);
201	}
202	return map;
203out_delete:
204	nsinfo__put(nsi);
205	free(map);
206	return NULL;
207}
208
209/*
210 * Constructor variant for modules (where we know from /proc/modules where
211 * they are loaded) and for vmlinux, where only after we load all the
212 * symbols we'll know where it starts and ends.
213 */
214struct map *map__new2(u64 start, struct dso *dso)
215{
216	struct map *map = calloc(1, (sizeof(*map) +
217				     (dso->kernel ? sizeof(struct kmap) : 0)));
218	if (map != NULL) {
219		/*
220		 * ->end will be filled after we load all the symbols
221		 */
222		map__init(map, start, 0, 0, dso);
223	}
224
225	return map;
226}
227
228bool __map__is_kernel(const struct map *map)
229{
230	if (!map->dso->kernel)
231		return false;
232	return machine__kernel_map(map__kmaps((struct map *)map)->machine) == map;
233}
234
235bool __map__is_extra_kernel_map(const struct map *map)
236{
237	struct kmap *kmap = __map__kmap((struct map *)map);
238
239	return kmap && kmap->name[0];
240}
241
242bool __map__is_bpf_prog(const struct map *map)
243{
244	const char *name;
245
246	if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
247		return true;
248
249	/*
250	 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
251	 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
252	 * guess the type based on name.
253	 */
254	name = map->dso->short_name;
255	return name && (strstr(name, "bpf_prog_") == name);
256}
257
258bool __map__is_bpf_image(const struct map *map)
259{
260	const char *name;
261
262	if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
263		return true;
264
265	/*
266	 * If PERF_RECORD_KSYMBOL is not included, the dso will not have
267	 * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
268	 * guess the type based on name.
269	 */
270	name = map->dso->short_name;
271	return name && is_bpf_image(name);
272}
273
274bool __map__is_ool(const struct map *map)
275{
276	return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL;
277}
278
279bool map__has_symbols(const struct map *map)
280{
281	return dso__has_symbols(map->dso);
282}
283
284static void map__exit(struct map *map)
285{
286	BUG_ON(refcount_read(&map->refcnt) != 0);
287	dso__zput(map->dso);
288}
289
290void map__delete(struct map *map)
291{
292	map__exit(map);
293	free(map);
294}
295
296void map__put(struct map *map)
297{
298	if (map && refcount_dec_and_test(&map->refcnt))
299		map__delete(map);
300}
301
302void map__fixup_start(struct map *map)
303{
304	struct rb_root_cached *symbols = &map->dso->symbols;
305	struct rb_node *nd = rb_first_cached(symbols);
306	if (nd != NULL) {
307		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
308		map->start = sym->start;
309	}
310}
311
312void map__fixup_end(struct map *map)
313{
314	struct rb_root_cached *symbols = &map->dso->symbols;
315	struct rb_node *nd = rb_last(&symbols->rb_root);
316	if (nd != NULL) {
317		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
318		map->end = sym->end;
319	}
320}
321
322#define DSO__DELETED "(deleted)"
323
324int map__load(struct map *map)
325{
326	const char *name = map->dso->long_name;
327	int nr;
328
329	if (dso__loaded(map->dso))
330		return 0;
331
332	nr = dso__load(map->dso, map);
333	if (nr < 0) {
334		if (map->dso->has_build_id) {
335			char sbuild_id[SBUILD_ID_SIZE];
336
337			build_id__sprintf(&map->dso->bid, sbuild_id);
338			pr_debug("%s with build id %s not found", name, sbuild_id);
 
 
 
339		} else
340			pr_debug("Failed to open %s", name);
341
342		pr_debug(", continuing without symbols\n");
343		return -1;
344	} else if (nr == 0) {
345#ifdef HAVE_LIBELF_SUPPORT
346		const size_t len = strlen(name);
347		const size_t real_len = len - sizeof(DSO__DELETED);
348
349		if (len > sizeof(DSO__DELETED) &&
350		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
351			pr_debug("%.*s was updated (is prelink enabled?). "
352				"Restart the long running apps that use it!\n",
353				   (int)real_len, name);
354		} else {
355			pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
 
356		}
357#endif
358		return -1;
359	}
 
 
 
 
 
 
360
361	return 0;
362}
363
364struct symbol *map__find_symbol(struct map *map, u64 addr)
 
365{
366	if (map__load(map) < 0)
367		return NULL;
368
369	return dso__find_symbol(map->dso, addr);
370}
371
372struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
 
373{
374	if (map__load(map) < 0)
375		return NULL;
376
377	if (!dso__sorted_by_name(map->dso))
378		dso__sort_by_name(map->dso);
379
380	return dso__find_symbol_by_name(map->dso, name);
381}
382
383struct map *map__clone(struct map *from)
384{
385	size_t size = sizeof(struct map);
386	struct map *map;
 
 
387
388	if (from->dso && from->dso->kernel)
389		size += sizeof(struct kmap);
390
391	map = memdup(from, size);
392	if (map != NULL) {
393		refcount_set(&map->refcnt, 1);
394		RB_CLEAR_NODE(&map->rb_node);
395		dso__get(map->dso);
 
 
 
 
396	}
397
398	return map;
 
 
 
399}
400
401size_t map__fprintf(struct map *map, FILE *fp)
402{
403	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
404		       map->start, map->end, map->pgoff, map->dso->name);
405}
406
407size_t map__fprintf_dsoname(struct map *map, FILE *fp)
408{
409	char buf[symbol_conf.pad_output_len_dso + 1];
410	const char *dsoname = "[unknown]";
411
412	if (map && map->dso) {
413		if (symbol_conf.show_kernel_path && map->dso->long_name)
414			dsoname = map->dso->long_name;
415		else
416			dsoname = map->dso->name;
417	}
418
419	if (symbol_conf.pad_output_len_dso) {
420		scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname);
421		dsoname = buf;
422	}
423
424	return fprintf(fp, "%s", dsoname);
425}
426
427char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
428{
429	if (map == NULL)
430		return SRCLINE_UNKNOWN;
431	return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
432}
433
434int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
435			 FILE *fp)
436{
437	int ret = 0;
438
439	if (map && map->dso) {
440		char *srcline = map__srcline(map, addr, NULL);
441		if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)
442			ret = fprintf(fp, "%s%s", prefix, srcline);
443		free_srcline(srcline);
444	}
445	return ret;
446}
447
448void srccode_state_free(struct srccode_state *state)
449{
450	zfree(&state->srcfile);
451	state->line = 0;
452}
453
454/**
455 * map__rip_2objdump - convert symbol start address to objdump address.
456 * @map: memory map
457 * @rip: symbol start address
458 *
459 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
460 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
461 * relative to section start.
462 *
463 * Return: Address suitable for passing to "objdump --start-address="
464 */
465u64 map__rip_2objdump(struct map *map, u64 rip)
466{
467	struct kmap *kmap = __map__kmap(map);
468
469	/*
470	 * vmlinux does not have program headers for PTI entry trampolines and
471	 * kcore may not either. However the trampoline object code is on the
472	 * main kernel map, so just use that instead.
473	 */
474	if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
475		struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
476
477		if (kernel_map)
478			map = kernel_map;
479	}
480
481	if (!map->dso->adjust_symbols)
482		return rip;
483
484	if (map->dso->rel)
485		return rip - map->pgoff;
486
487	/*
488	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
489	 * but all kernel modules are ET_REL, so won't get here.
490	 */
491	if (map->dso->kernel == DSO_SPACE__USER)
492		return rip + map->dso->text_offset;
493
494	return map->unmap_ip(map, rip) - map->reloc;
495}
496
497/**
498 * map__objdump_2mem - convert objdump address to a memory address.
499 * @map: memory map
500 * @ip: objdump address
501 *
502 * Closely related to map__rip_2objdump(), this function takes an address from
503 * objdump and converts it to a memory address.  Note this assumes that @map
504 * contains the address.  To be sure the result is valid, check it forwards
505 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
506 *
507 * Return: Memory address.
508 */
509u64 map__objdump_2mem(struct map *map, u64 ip)
510{
511	if (!map->dso->adjust_symbols)
512		return map->unmap_ip(map, ip);
513
514	if (map->dso->rel)
515		return map->unmap_ip(map, ip + map->pgoff);
516
517	/*
518	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
519	 * but all kernel modules are ET_REL, so won't get here.
520	 */
521	if (map->dso->kernel == DSO_SPACE__USER)
522		return map->unmap_ip(map, ip - map->dso->text_offset);
523
524	return ip + map->reloc;
525}
526
527void maps__init(struct maps *maps, struct machine *machine)
528{
529	maps->entries = RB_ROOT;
530	init_rwsem(&maps->lock);
531	maps->machine = machine;
532	maps->last_search_by_name = NULL;
533	maps->nr_maps = 0;
534	maps->maps_by_name = NULL;
535	refcount_set(&maps->refcnt, 1);
536}
537
538static void __maps__free_maps_by_name(struct maps *maps)
539{
540	/*
541	 * Free everything to try to do it from the rbtree in the next search
542	 */
543	zfree(&maps->maps_by_name);
544	maps->nr_maps_allocated = 0;
 
545}
546
547void maps__insert(struct maps *maps, struct map *map)
548{
549	down_write(&maps->lock);
550	__maps__insert(maps, map);
551	++maps->nr_maps;
552
553	if (map->dso && map->dso->kernel) {
554		struct kmap *kmap = map__kmap(map);
555
556		if (kmap)
557			kmap->kmaps = maps;
558		else
559			pr_err("Internal error: kernel dso with non kernel map\n");
560	}
561
562
563	/*
564	 * If we already performed some search by name, then we need to add the just
565	 * inserted map and resort.
566	 */
567	if (maps->maps_by_name) {
568		if (maps->nr_maps > maps->nr_maps_allocated) {
569			int nr_allocate = maps->nr_maps * 2;
570			struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
571
572			if (maps_by_name == NULL) {
573				__maps__free_maps_by_name(maps);
574				up_write(&maps->lock);
575				return;
576			}
577
578			maps->maps_by_name = maps_by_name;
579			maps->nr_maps_allocated = nr_allocate;
580		}
581		maps->maps_by_name[maps->nr_maps - 1] = map;
582		__maps__sort_by_name(maps);
583	}
584	up_write(&maps->lock);
585}
586
587static void __maps__remove(struct maps *maps, struct map *map)
588{
589	rb_erase_init(&map->rb_node, &maps->entries);
590	map__put(map);
591}
592
593void maps__remove(struct maps *maps, struct map *map)
594{
595	down_write(&maps->lock);
596	if (maps->last_search_by_name == map)
597		maps->last_search_by_name = NULL;
598
599	__maps__remove(maps, map);
600	--maps->nr_maps;
601	if (maps->maps_by_name)
602		__maps__free_maps_by_name(maps);
603	up_write(&maps->lock);
604}
605
606static void __maps__purge(struct maps *maps)
607{
608	struct map *pos, *next;
609
610	maps__for_each_entry_safe(maps, pos, next) {
611		rb_erase_init(&pos->rb_node,  &maps->entries);
612		map__put(pos);
613	}
614}
615
616void maps__exit(struct maps *maps)
617{
618	down_write(&maps->lock);
619	__maps__purge(maps);
620	up_write(&maps->lock);
621}
622
623bool maps__empty(struct maps *maps)
624{
625	return !maps__first(maps);
626}
627
628struct maps *maps__new(struct machine *machine)
629{
630	struct maps *maps = zalloc(sizeof(*maps));
631
632	if (maps != NULL)
633		maps__init(maps, machine);
634
635	return maps;
 
 
 
 
636}
637
638void maps__delete(struct maps *maps)
 
 
 
639{
640	maps__exit(maps);
641	unwind__finish_access(maps);
642	free(maps);
643}
644
645void maps__put(struct maps *maps)
646{
647	if (maps && refcount_dec_and_test(&maps->refcnt))
648		maps__delete(maps);
649}
650
651struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
652{
653	struct map *map = maps__find(maps, addr);
654
655	/* Ensure map is loaded before using map->map_ip */
656	if (map != NULL && map__load(map) >= 0) {
657		if (mapp != NULL)
658			*mapp = map;
659		return map__find_symbol(map, map->map_ip(map, addr));
660	}
661
662	return NULL;
663}
664
665static bool map__contains_symbol(struct map *map, struct symbol *sym)
666{
667	u64 ip = map->unmap_ip(map, sym->start);
668
669	return ip >= map->start && ip < map->end;
670}
671
672struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
673{
674	struct symbol *sym;
675	struct map *pos;
676
677	down_read(&maps->lock);
678
679	maps__for_each_entry(maps, pos) {
680		sym = map__find_symbol_by_name(pos, name);
681
682		if (sym == NULL)
683			continue;
684		if (!map__contains_symbol(pos, sym)) {
685			sym = NULL;
686			continue;
687		}
688		if (mapp != NULL)
689			*mapp = pos;
690		goto out;
691	}
692
693	sym = NULL;
694out:
695	up_read(&maps->lock);
696	return sym;
697}
698
699int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
 
700{
701	if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
702		if (maps == NULL)
703			return -1;
704		ams->ms.map = maps__find(maps, ams->addr);
705		if (ams->ms.map == NULL)
706			return -1;
 
 
 
 
 
707	}
708
709	ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
710	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
711
712	return ams->ms.sym ? 0 : -1;
 
 
 
 
 
713}
714
715size_t maps__fprintf(struct maps *maps, FILE *fp)
 
 
716{
 
717	size_t printed = 0;
718	struct map *pos;
719
720	down_read(&maps->lock);
721
722	maps__for_each_entry(maps, pos) {
723		printed += fprintf(fp, "Map:");
724		printed += map__fprintf(pos, fp);
725		if (verbose > 2) {
726			printed += dso__fprintf(pos->dso, fp);
727			printed += fprintf(fp, "--\n");
728		}
729	}
 
 
730
731	up_read(&maps->lock);
 
 
 
 
 
 
 
732
733	return printed;
 
 
 
 
734}
735
736int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
 
737{
738	struct rb_root *root;
739	struct rb_node *next, *first;
740	int err = 0;
741
742	down_write(&maps->lock);
743
744	root = &maps->entries;
745
746	/*
747	 * Find first map where end > map->start.
748	 * Same as find_vma() in kernel.
749	 */
750	next = root->rb_node;
751	first = NULL;
752	while (next) {
753		struct map *pos = rb_entry(next, struct map, rb_node);
754
755		if (pos->end > map->start) {
756			first = next;
757			if (pos->start <= map->start)
758				break;
759			next = next->rb_left;
760		} else
761			next = next->rb_right;
762	}
763
764	next = first;
765	while (next) {
766		struct map *pos = rb_entry(next, struct map, rb_node);
767		next = rb_next(&pos->rb_node);
768
769		/*
770		 * Stop if current map starts after map->end.
771		 * Maps are ordered by start: next will not overlap for sure.
772		 */
773		if (pos->start >= map->end)
774			break;
775
776		if (verbose >= 2) {
777
778			if (use_browser) {
779				pr_debug("overlapping maps in %s (disable tui for more info)\n",
780					   map->dso->name);
781			} else {
782				fputs("overlapping maps:\n", fp);
783				map__fprintf(map, fp);
784				map__fprintf(pos, fp);
785			}
786		}
787
788		rb_erase_init(&pos->rb_node, root);
789		/*
790		 * Now check if we need to create new maps for areas not
791		 * overlapped by the new map:
792		 */
793		if (map->start > pos->start) {
794			struct map *before = map__clone(pos);
795
796			if (before == NULL) {
797				err = -ENOMEM;
798				goto put_map;
799			}
800
801			before->end = map->start;
802			__maps__insert(maps, before);
803			if (verbose >= 2 && !use_browser)
804				map__fprintf(before, fp);
805			map__put(before);
806		}
807
808		if (map->end < pos->end) {
809			struct map *after = map__clone(pos);
810
811			if (after == NULL) {
812				err = -ENOMEM;
813				goto put_map;
814			}
815
816			after->start = map->end;
817			after->pgoff += map->end - pos->start;
818			assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
819			__maps__insert(maps, after);
820			if (verbose >= 2 && !use_browser)
821				map__fprintf(after, fp);
822			map__put(after);
823		}
824put_map:
825		map__put(pos);
 
 
 
 
 
 
826
827		if (err)
828			goto out;
829	}
830
831	err = 0;
832out:
833	up_write(&maps->lock);
834	return err;
835}
836
837/*
838 * XXX This should not really _copy_ te maps, but refcount them.
839 */
840int maps__clone(struct thread *thread, struct maps *parent)
 
841{
842	struct maps *maps = thread->maps;
843	int err;
844	struct map *map;
 
 
 
 
 
 
 
845
846	down_read(&parent->lock);
 
 
 
847
848	maps__for_each_entry(parent, map) {
849		struct map *new = map__clone(map);
 
 
 
 
 
 
 
850
851		if (new == NULL) {
852			err = -ENOMEM;
853			goto out_unlock;
854		}
855
856		err = unwind__prepare_access(maps, new, NULL);
857		if (err)
858			goto out_unlock;
859
860		maps__insert(maps, new);
861		map__put(new);
862	}
863
864	err = 0;
865out_unlock:
866	up_read(&parent->lock);
867	return err;
868}
869
870static void __maps__insert(struct maps *maps, struct map *map)
871{
872	struct rb_node **p = &maps->entries.rb_node;
873	struct rb_node *parent = NULL;
874	const u64 ip = map->start;
875	struct map *m;
876
877	while (*p != NULL) {
878		parent = *p;
879		m = rb_entry(parent, struct map, rb_node);
880		if (ip < m->start)
881			p = &(*p)->rb_left;
882		else
883			p = &(*p)->rb_right;
884	}
885
886	rb_link_node(&map->rb_node, parent, p);
887	rb_insert_color(&map->rb_node, &maps->entries);
888	map__get(map);
 
 
 
 
889}
890
891struct map *maps__find(struct maps *maps, u64 ip)
892{
893	struct rb_node *p;
 
894	struct map *m;
895
896	down_read(&maps->lock);
897
898	p = maps->entries.rb_node;
899	while (p != NULL) {
900		m = rb_entry(p, struct map, rb_node);
901		if (ip < m->start)
902			p = p->rb_left;
903		else if (ip >= m->end)
904			p = p->rb_right;
905		else
906			goto out;
907	}
908
909	m = NULL;
910out:
911	up_read(&maps->lock);
912	return m;
913}
914
915struct map *maps__first(struct maps *maps)
916{
917	struct rb_node *first = rb_first(&maps->entries);
 
 
 
 
 
 
 
918
919	if (first)
920		return rb_entry(first, struct map, rb_node);
921	return NULL;
 
922}
923
924static struct map *__map__next(struct map *map)
925{
926	struct rb_node *next = rb_next(&map->rb_node);
927
928	if (next)
929		return rb_entry(next, struct map, rb_node);
930	return NULL;
 
 
 
 
 
 
 
 
 
 
931}
932
933struct map *map__next(struct map *map)
934{
935	return map ? __map__next(map) : NULL;
 
936}
937
938struct kmap *__map__kmap(struct map *map)
 
939{
940	if (!map->dso || !map->dso->kernel)
 
 
 
 
 
 
 
 
941		return NULL;
942	return (struct kmap *)(map + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
943}
944
945struct kmap *map__kmap(struct map *map)
946{
947	struct kmap *kmap = __map__kmap(map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
948
949	if (!kmap)
950		pr_err("Internal error: map__kmap with a non-kernel map\n");
951	return kmap;
952}
953
954struct maps *map__kmaps(struct map *map)
955{
956	struct kmap *kmap = map__kmap(map);
 
 
957
958	if (!kmap || !kmap->kmaps) {
959		pr_err("Internal error: map__kmaps with a non-kernel map\n");
960		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
961	}
962	return kmap->kmaps;
 
 
 
 
 
 
 
 
 
 
 
963}