Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <stdlib.h>
4#include <linux/zalloc.h>
5#include "debug.h"
6#include "dso.h"
7#include "map.h"
8#include "maps.h"
9#include "thread.h"
10#include "ui/ui.h"
11#include "unwind.h"
12
13struct map_rb_node {
14 struct rb_node rb_node;
15 struct map *map;
16};
17
18#define maps__for_each_entry(maps, map) \
19 for (map = maps__first(maps); map; map = map_rb_node__next(map))
20
21#define maps__for_each_entry_safe(maps, map, next) \
22 for (map = maps__first(maps), next = map_rb_node__next(map); map; \
23 map = next, next = map_rb_node__next(map))
24
25static struct rb_root *maps__entries(struct maps *maps)
26{
27 return &RC_CHK_ACCESS(maps)->entries;
28}
29
30static struct rw_semaphore *maps__lock(struct maps *maps)
31{
32 return &RC_CHK_ACCESS(maps)->lock;
33}
34
35static struct map **maps__maps_by_name(struct maps *maps)
36{
37 return RC_CHK_ACCESS(maps)->maps_by_name;
38}
39
40static struct map_rb_node *maps__first(struct maps *maps)
41{
42 struct rb_node *first = rb_first(maps__entries(maps));
43
44 if (first)
45 return rb_entry(first, struct map_rb_node, rb_node);
46 return NULL;
47}
48
49static struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
50{
51 struct rb_node *next;
52
53 if (!node)
54 return NULL;
55
56 next = rb_next(&node->rb_node);
57
58 if (!next)
59 return NULL;
60
61 return rb_entry(next, struct map_rb_node, rb_node);
62}
63
64static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
65{
66 struct map_rb_node *rb_node;
67
68 maps__for_each_entry(maps, rb_node) {
69 if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
70 return rb_node;
71 }
72 return NULL;
73}
74
75static void maps__init(struct maps *maps, struct machine *machine)
76{
77 refcount_set(maps__refcnt(maps), 1);
78 init_rwsem(maps__lock(maps));
79 RC_CHK_ACCESS(maps)->entries = RB_ROOT;
80 RC_CHK_ACCESS(maps)->machine = machine;
81 RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
82 RC_CHK_ACCESS(maps)->nr_maps = 0;
83 RC_CHK_ACCESS(maps)->maps_by_name = NULL;
84}
85
86static void __maps__free_maps_by_name(struct maps *maps)
87{
88 /*
89 * Free everything to try to do it from the rbtree in the next search
90 */
91 for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
92 map__put(maps__maps_by_name(maps)[i]);
93
94 zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
95 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
96}
97
98static int __maps__insert(struct maps *maps, struct map *map)
99{
100 struct rb_node **p = &maps__entries(maps)->rb_node;
101 struct rb_node *parent = NULL;
102 const u64 ip = map__start(map);
103 struct map_rb_node *m, *new_rb_node;
104
105 new_rb_node = malloc(sizeof(*new_rb_node));
106 if (!new_rb_node)
107 return -ENOMEM;
108
109 RB_CLEAR_NODE(&new_rb_node->rb_node);
110 new_rb_node->map = map__get(map);
111
112 while (*p != NULL) {
113 parent = *p;
114 m = rb_entry(parent, struct map_rb_node, rb_node);
115 if (ip < map__start(m->map))
116 p = &(*p)->rb_left;
117 else
118 p = &(*p)->rb_right;
119 }
120
121 rb_link_node(&new_rb_node->rb_node, parent, p);
122 rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
123 return 0;
124}
125
126int maps__insert(struct maps *maps, struct map *map)
127{
128 int err;
129 const struct dso *dso = map__dso(map);
130
131 down_write(maps__lock(maps));
132 err = __maps__insert(maps, map);
133 if (err)
134 goto out;
135
136 ++RC_CHK_ACCESS(maps)->nr_maps;
137
138 if (dso && dso->kernel) {
139 struct kmap *kmap = map__kmap(map);
140
141 if (kmap)
142 kmap->kmaps = maps;
143 else
144 pr_err("Internal error: kernel dso with non kernel map\n");
145 }
146
147
148 /*
149 * If we already performed some search by name, then we need to add the just
150 * inserted map and resort.
151 */
152 if (maps__maps_by_name(maps)) {
153 if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
154 int nr_allocate = maps__nr_maps(maps) * 2;
155 struct map **maps_by_name = realloc(maps__maps_by_name(maps),
156 nr_allocate * sizeof(map));
157
158 if (maps_by_name == NULL) {
159 __maps__free_maps_by_name(maps);
160 err = -ENOMEM;
161 goto out;
162 }
163
164 RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
165 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
166 }
167 maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
168 __maps__sort_by_name(maps);
169 }
170 out:
171 up_write(maps__lock(maps));
172 return err;
173}
174
175static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
176{
177 rb_erase_init(&rb_node->rb_node, maps__entries(maps));
178 map__put(rb_node->map);
179 free(rb_node);
180}
181
182void maps__remove(struct maps *maps, struct map *map)
183{
184 struct map_rb_node *rb_node;
185
186 down_write(maps__lock(maps));
187 if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
188 RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
189
190 rb_node = maps__find_node(maps, map);
191 assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
192 __maps__remove(maps, rb_node);
193 if (maps__maps_by_name(maps))
194 __maps__free_maps_by_name(maps);
195 --RC_CHK_ACCESS(maps)->nr_maps;
196 up_write(maps__lock(maps));
197}
198
199static void __maps__purge(struct maps *maps)
200{
201 struct map_rb_node *pos, *next;
202
203 if (maps__maps_by_name(maps))
204 __maps__free_maps_by_name(maps);
205
206 maps__for_each_entry_safe(maps, pos, next) {
207 rb_erase_init(&pos->rb_node, maps__entries(maps));
208 map__put(pos->map);
209 free(pos);
210 }
211}
212
213static void maps__exit(struct maps *maps)
214{
215 down_write(maps__lock(maps));
216 __maps__purge(maps);
217 up_write(maps__lock(maps));
218}
219
220bool maps__empty(struct maps *maps)
221{
222 return !maps__first(maps);
223}
224
225struct maps *maps__new(struct machine *machine)
226{
227 struct maps *result;
228 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
229
230 if (ADD_RC_CHK(result, maps))
231 maps__init(result, machine);
232
233 return result;
234}
235
236static void maps__delete(struct maps *maps)
237{
238 maps__exit(maps);
239 unwind__finish_access(maps);
240 RC_CHK_FREE(maps);
241}
242
243struct maps *maps__get(struct maps *maps)
244{
245 struct maps *result;
246
247 if (RC_CHK_GET(result, maps))
248 refcount_inc(maps__refcnt(maps));
249
250 return result;
251}
252
253void maps__put(struct maps *maps)
254{
255 if (maps && refcount_dec_and_test(maps__refcnt(maps)))
256 maps__delete(maps);
257 else
258 RC_CHK_PUT(maps);
259}
260
261int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
262{
263 struct map_rb_node *pos;
264 int ret = 0;
265
266 down_read(maps__lock(maps));
267 maps__for_each_entry(maps, pos) {
268 ret = cb(pos->map, data);
269 if (ret)
270 break;
271 }
272 up_read(maps__lock(maps));
273 return ret;
274}
275
276void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
277{
278 struct map_rb_node *pos, *next;
279 unsigned int start_nr_maps;
280
281 down_write(maps__lock(maps));
282
283 start_nr_maps = maps__nr_maps(maps);
284 maps__for_each_entry_safe(maps, pos, next) {
285 if (cb(pos->map, data)) {
286 __maps__remove(maps, pos);
287 --RC_CHK_ACCESS(maps)->nr_maps;
288 }
289 }
290 if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps))
291 __maps__free_maps_by_name(maps);
292
293 up_write(maps__lock(maps));
294}
295
296struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
297{
298 struct map *map = maps__find(maps, addr);
299
300 /* Ensure map is loaded before using map->map_ip */
301 if (map != NULL && map__load(map) >= 0) {
302 if (mapp != NULL)
303 *mapp = map;
304 return map__find_symbol(map, map__map_ip(map, addr));
305 }
306
307 return NULL;
308}
309
310struct maps__find_symbol_by_name_args {
311 struct map **mapp;
312 const char *name;
313 struct symbol *sym;
314};
315
316static int maps__find_symbol_by_name_cb(struct map *map, void *data)
317{
318 struct maps__find_symbol_by_name_args *args = data;
319
320 args->sym = map__find_symbol_by_name(map, args->name);
321 if (!args->sym)
322 return 0;
323
324 if (!map__contains_symbol(map, args->sym)) {
325 args->sym = NULL;
326 return 0;
327 }
328
329 if (args->mapp != NULL)
330 *args->mapp = map__get(map);
331 return 1;
332}
333
334struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
335{
336 struct maps__find_symbol_by_name_args args = {
337 .mapp = mapp,
338 .name = name,
339 .sym = NULL,
340 };
341
342 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
343 return args.sym;
344}
345
346int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
347{
348 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
349 if (maps == NULL)
350 return -1;
351 ams->ms.map = maps__find(maps, ams->addr);
352 if (ams->ms.map == NULL)
353 return -1;
354 }
355
356 ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
357 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
358
359 return ams->ms.sym ? 0 : -1;
360}
361
362struct maps__fprintf_args {
363 FILE *fp;
364 size_t printed;
365};
366
367static int maps__fprintf_cb(struct map *map, void *data)
368{
369 struct maps__fprintf_args *args = data;
370
371 args->printed += fprintf(args->fp, "Map:");
372 args->printed += map__fprintf(map, args->fp);
373 if (verbose > 2) {
374 args->printed += dso__fprintf(map__dso(map), args->fp);
375 args->printed += fprintf(args->fp, "--\n");
376 }
377 return 0;
378}
379
380size_t maps__fprintf(struct maps *maps, FILE *fp)
381{
382 struct maps__fprintf_args args = {
383 .fp = fp,
384 .printed = 0,
385 };
386
387 maps__for_each_map(maps, maps__fprintf_cb, &args);
388
389 return args.printed;
390}
391
392/*
393 * Find first map where end > map->start.
394 * Same as find_vma() in kernel.
395 */
396static struct rb_node *first_ending_after(struct maps *maps, const struct map *map)
397{
398 struct rb_root *root;
399 struct rb_node *next, *first;
400
401 root = maps__entries(maps);
402 next = root->rb_node;
403 first = NULL;
404 while (next) {
405 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
406
407 if (map__end(pos->map) > map__start(map)) {
408 first = next;
409 if (map__start(pos->map) <= map__start(map))
410 break;
411 next = next->rb_left;
412 } else
413 next = next->rb_right;
414 }
415 return first;
416}
417
418/*
419 * Adds new to maps, if new overlaps existing entries then the existing maps are
420 * adjusted or removed so that new fits without overlapping any entries.
421 */
422int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
423{
424
425 struct rb_node *next;
426 int err = 0;
427 FILE *fp = debug_file();
428
429 down_write(maps__lock(maps));
430
431 next = first_ending_after(maps, new);
432 while (next && !err) {
433 struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
434 next = rb_next(&pos->rb_node);
435
436 /*
437 * Stop if current map starts after map->end.
438 * Maps are ordered by start: next will not overlap for sure.
439 */
440 if (map__start(pos->map) >= map__end(new))
441 break;
442
443 if (verbose >= 2) {
444
445 if (use_browser) {
446 pr_debug("overlapping maps in %s (disable tui for more info)\n",
447 map__dso(new)->name);
448 } else {
449 pr_debug("overlapping maps:\n");
450 map__fprintf(new, fp);
451 map__fprintf(pos->map, fp);
452 }
453 }
454
455 rb_erase_init(&pos->rb_node, maps__entries(maps));
456 /*
457 * Now check if we need to create new maps for areas not
458 * overlapped by the new map:
459 */
460 if (map__start(new) > map__start(pos->map)) {
461 struct map *before = map__clone(pos->map);
462
463 if (before == NULL) {
464 err = -ENOMEM;
465 goto put_map;
466 }
467
468 map__set_end(before, map__start(new));
469 err = __maps__insert(maps, before);
470 if (err) {
471 map__put(before);
472 goto put_map;
473 }
474
475 if (verbose >= 2 && !use_browser)
476 map__fprintf(before, fp);
477 map__put(before);
478 }
479
480 if (map__end(new) < map__end(pos->map)) {
481 struct map *after = map__clone(pos->map);
482
483 if (after == NULL) {
484 err = -ENOMEM;
485 goto put_map;
486 }
487
488 map__set_start(after, map__end(new));
489 map__add_pgoff(after, map__end(new) - map__start(pos->map));
490 assert(map__map_ip(pos->map, map__end(new)) ==
491 map__map_ip(after, map__end(new)));
492 err = __maps__insert(maps, after);
493 if (err) {
494 map__put(after);
495 goto put_map;
496 }
497 if (verbose >= 2 && !use_browser)
498 map__fprintf(after, fp);
499 map__put(after);
500 }
501put_map:
502 map__put(pos->map);
503 free(pos);
504 }
505 /* Add the map. */
506 err = __maps__insert(maps, new);
507 up_write(maps__lock(maps));
508 return err;
509}
510
511int maps__copy_from(struct maps *maps, struct maps *parent)
512{
513 int err;
514 struct map_rb_node *rb_node;
515
516 down_read(maps__lock(parent));
517
518 maps__for_each_entry(parent, rb_node) {
519 struct map *new = map__clone(rb_node->map);
520
521 if (new == NULL) {
522 err = -ENOMEM;
523 goto out_unlock;
524 }
525
526 err = unwind__prepare_access(maps, new, NULL);
527 if (err)
528 goto out_unlock;
529
530 err = maps__insert(maps, new);
531 if (err)
532 goto out_unlock;
533
534 map__put(new);
535 }
536
537 err = 0;
538out_unlock:
539 up_read(maps__lock(parent));
540 return err;
541}
542
543struct map *maps__find(struct maps *maps, u64 ip)
544{
545 struct rb_node *p;
546 struct map_rb_node *m;
547
548
549 down_read(maps__lock(maps));
550
551 p = maps__entries(maps)->rb_node;
552 while (p != NULL) {
553 m = rb_entry(p, struct map_rb_node, rb_node);
554 if (ip < map__start(m->map))
555 p = p->rb_left;
556 else if (ip >= map__end(m->map))
557 p = p->rb_right;
558 else
559 goto out;
560 }
561
562 m = NULL;
563out:
564 up_read(maps__lock(maps));
565 return m ? m->map : NULL;
566}
567
568static int map__strcmp(const void *a, const void *b)
569{
570 const struct map *map_a = *(const struct map **)a;
571 const struct map *map_b = *(const struct map **)b;
572 const struct dso *dso_a = map__dso(map_a);
573 const struct dso *dso_b = map__dso(map_b);
574 int ret = strcmp(dso_a->short_name, dso_b->short_name);
575
576 if (ret == 0 && map_a != map_b) {
577 /*
578 * Ensure distinct but name equal maps have an order in part to
579 * aid reference counting.
580 */
581 ret = (int)map__start(map_a) - (int)map__start(map_b);
582 if (ret == 0)
583 ret = (int)((intptr_t)map_a - (intptr_t)map_b);
584 }
585
586 return ret;
587}
588
589static int map__strcmp_name(const void *name, const void *b)
590{
591 const struct dso *dso = map__dso(*(const struct map **)b);
592
593 return strcmp(name, dso->short_name);
594}
595
596void __maps__sort_by_name(struct maps *maps)
597{
598 qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
599}
600
601static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
602{
603 struct map_rb_node *rb_node;
604 struct map **maps_by_name = realloc(maps__maps_by_name(maps),
605 maps__nr_maps(maps) * sizeof(struct map *));
606 int i = 0;
607
608 if (maps_by_name == NULL)
609 return -1;
610
611 up_read(maps__lock(maps));
612 down_write(maps__lock(maps));
613
614 RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
615 RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
616
617 maps__for_each_entry(maps, rb_node)
618 maps_by_name[i++] = map__get(rb_node->map);
619
620 __maps__sort_by_name(maps);
621
622 up_write(maps__lock(maps));
623 down_read(maps__lock(maps));
624
625 return 0;
626}
627
628static struct map *__maps__find_by_name(struct maps *maps, const char *name)
629{
630 struct map **mapp;
631
632 if (maps__maps_by_name(maps) == NULL &&
633 map__groups__sort_by_name_from_rbtree(maps))
634 return NULL;
635
636 mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
637 sizeof(*mapp), map__strcmp_name);
638 if (mapp)
639 return *mapp;
640 return NULL;
641}
642
643struct map *maps__find_by_name(struct maps *maps, const char *name)
644{
645 struct map_rb_node *rb_node;
646 struct map *map;
647
648 down_read(maps__lock(maps));
649
650
651 if (RC_CHK_ACCESS(maps)->last_search_by_name) {
652 const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
653
654 if (strcmp(dso->short_name, name) == 0) {
655 map = RC_CHK_ACCESS(maps)->last_search_by_name;
656 goto out_unlock;
657 }
658 }
659 /*
660 * If we have maps->maps_by_name, then the name isn't in the rbtree,
661 * as maps->maps_by_name mirrors the rbtree when lookups by name are
662 * made.
663 */
664 map = __maps__find_by_name(maps, name);
665 if (map || maps__maps_by_name(maps) != NULL)
666 goto out_unlock;
667
668 /* Fallback to traversing the rbtree... */
669 maps__for_each_entry(maps, rb_node) {
670 struct dso *dso;
671
672 map = rb_node->map;
673 dso = map__dso(map);
674 if (strcmp(dso->short_name, name) == 0) {
675 RC_CHK_ACCESS(maps)->last_search_by_name = map;
676 goto out_unlock;
677 }
678 }
679 map = NULL;
680
681out_unlock:
682 up_read(maps__lock(maps));
683 return map;
684}
685
686struct map *maps__find_next_entry(struct maps *maps, struct map *map)
687{
688 struct map_rb_node *rb_node = maps__find_node(maps, map);
689 struct map_rb_node *next = map_rb_node__next(rb_node);
690
691 if (next)
692 return next->map;
693
694 return NULL;
695}
696
697void maps__fixup_end(struct maps *maps)
698{
699 struct map_rb_node *prev = NULL, *curr;
700
701 down_write(maps__lock(maps));
702
703 maps__for_each_entry(maps, curr) {
704 if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map)))
705 map__set_end(prev->map, map__start(curr->map));
706
707 prev = curr;
708 }
709
710 /*
711 * We still haven't the actual symbols, so guess the
712 * last map final address.
713 */
714 if (curr && !map__end(curr->map))
715 map__set_end(curr->map, ~0ULL);
716
717 up_write(maps__lock(maps));
718}
719
720/*
721 * Merges map into maps by splitting the new map within the existing map
722 * regions.
723 */
724int maps__merge_in(struct maps *kmaps, struct map *new_map)
725{
726 struct map_rb_node *rb_node;
727 struct rb_node *first;
728 bool overlaps;
729 LIST_HEAD(merged);
730 int err = 0;
731
732 down_read(maps__lock(kmaps));
733 first = first_ending_after(kmaps, new_map);
734 rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL;
735 overlaps = rb_node && map__start(rb_node->map) < map__end(new_map);
736 up_read(maps__lock(kmaps));
737
738 if (!overlaps)
739 return maps__insert(kmaps, new_map);
740
741 maps__for_each_entry(kmaps, rb_node) {
742 struct map *old_map = rb_node->map;
743
744 /* no overload with this one */
745 if (map__end(new_map) < map__start(old_map) ||
746 map__start(new_map) >= map__end(old_map))
747 continue;
748
749 if (map__start(new_map) < map__start(old_map)) {
750 /*
751 * |new......
752 * |old....
753 */
754 if (map__end(new_map) < map__end(old_map)) {
755 /*
756 * |new......| -> |new..|
757 * |old....| -> |old....|
758 */
759 map__set_end(new_map, map__start(old_map));
760 } else {
761 /*
762 * |new.............| -> |new..| |new..|
763 * |old....| -> |old....|
764 */
765 struct map_list_node *m = map_list_node__new();
766
767 if (!m) {
768 err = -ENOMEM;
769 goto out;
770 }
771
772 m->map = map__clone(new_map);
773 if (!m->map) {
774 free(m);
775 err = -ENOMEM;
776 goto out;
777 }
778
779 map__set_end(m->map, map__start(old_map));
780 list_add_tail(&m->node, &merged);
781 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
782 map__set_start(new_map, map__end(old_map));
783 }
784 } else {
785 /*
786 * |new......
787 * |old....
788 */
789 if (map__end(new_map) < map__end(old_map)) {
790 /*
791 * |new..| -> x
792 * |old.........| -> |old.........|
793 */
794 map__put(new_map);
795 new_map = NULL;
796 break;
797 } else {
798 /*
799 * |new......| -> |new...|
800 * |old....| -> |old....|
801 */
802 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
803 map__set_start(new_map, map__end(old_map));
804 }
805 }
806 }
807
808out:
809 while (!list_empty(&merged)) {
810 struct map_list_node *old_node;
811
812 old_node = list_entry(merged.next, struct map_list_node, node);
813 list_del_init(&old_node->node);
814 if (!err)
815 err = maps__insert(kmaps, old_node->map);
816 map__put(old_node->map);
817 free(old_node);
818 }
819
820 if (new_map) {
821 if (!err)
822 err = maps__insert(kmaps, new_map);
823 map__put(new_map);
824 }
825 return err;
826}
827
828void maps__load_first(struct maps *maps)
829{
830 struct map_rb_node *first;
831
832 down_read(maps__lock(maps));
833
834 first = maps__first(maps);
835 if (first)
836 map__load(first->map);
837
838 up_read(maps__lock(maps));
839}
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <stdlib.h>
4#include <linux/zalloc.h>
5#include "debug.h"
6#include "dso.h"
7#include "map.h"
8#include "maps.h"
9#include "thread.h"
10#include "ui/ui.h"
11#include "unwind.h"
12
13static void __maps__insert(struct maps *maps, struct map *map);
14
15static void maps__init(struct maps *maps, struct machine *machine)
16{
17 maps->entries = RB_ROOT;
18 init_rwsem(&maps->lock);
19 maps->machine = machine;
20 maps->last_search_by_name = NULL;
21 maps->nr_maps = 0;
22 maps->maps_by_name = NULL;
23 refcount_set(&maps->refcnt, 1);
24}
25
26static void __maps__free_maps_by_name(struct maps *maps)
27{
28 /*
29 * Free everything to try to do it from the rbtree in the next search
30 */
31 zfree(&maps->maps_by_name);
32 maps->nr_maps_allocated = 0;
33}
34
35void maps__insert(struct maps *maps, struct map *map)
36{
37 down_write(&maps->lock);
38 __maps__insert(maps, map);
39 ++maps->nr_maps;
40
41 if (map->dso && map->dso->kernel) {
42 struct kmap *kmap = map__kmap(map);
43
44 if (kmap)
45 kmap->kmaps = maps;
46 else
47 pr_err("Internal error: kernel dso with non kernel map\n");
48 }
49
50
51 /*
52 * If we already performed some search by name, then we need to add the just
53 * inserted map and resort.
54 */
55 if (maps->maps_by_name) {
56 if (maps->nr_maps > maps->nr_maps_allocated) {
57 int nr_allocate = maps->nr_maps * 2;
58 struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
59
60 if (maps_by_name == NULL) {
61 __maps__free_maps_by_name(maps);
62 up_write(&maps->lock);
63 return;
64 }
65
66 maps->maps_by_name = maps_by_name;
67 maps->nr_maps_allocated = nr_allocate;
68 }
69 maps->maps_by_name[maps->nr_maps - 1] = map;
70 __maps__sort_by_name(maps);
71 }
72 up_write(&maps->lock);
73}
74
75static void __maps__remove(struct maps *maps, struct map *map)
76{
77 rb_erase_init(&map->rb_node, &maps->entries);
78 map__put(map);
79}
80
81void maps__remove(struct maps *maps, struct map *map)
82{
83 down_write(&maps->lock);
84 if (maps->last_search_by_name == map)
85 maps->last_search_by_name = NULL;
86
87 __maps__remove(maps, map);
88 --maps->nr_maps;
89 if (maps->maps_by_name)
90 __maps__free_maps_by_name(maps);
91 up_write(&maps->lock);
92}
93
94static void __maps__purge(struct maps *maps)
95{
96 struct map *pos, *next;
97
98 maps__for_each_entry_safe(maps, pos, next) {
99 rb_erase_init(&pos->rb_node, &maps->entries);
100 map__put(pos);
101 }
102}
103
104static void maps__exit(struct maps *maps)
105{
106 down_write(&maps->lock);
107 __maps__purge(maps);
108 up_write(&maps->lock);
109}
110
111bool maps__empty(struct maps *maps)
112{
113 return !maps__first(maps);
114}
115
116struct maps *maps__new(struct machine *machine)
117{
118 struct maps *maps = zalloc(sizeof(*maps));
119
120 if (maps != NULL)
121 maps__init(maps, machine);
122
123 return maps;
124}
125
126void maps__delete(struct maps *maps)
127{
128 maps__exit(maps);
129 unwind__finish_access(maps);
130 free(maps);
131}
132
133void maps__put(struct maps *maps)
134{
135 if (maps && refcount_dec_and_test(&maps->refcnt))
136 maps__delete(maps);
137}
138
139struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
140{
141 struct map *map = maps__find(maps, addr);
142
143 /* Ensure map is loaded before using map->map_ip */
144 if (map != NULL && map__load(map) >= 0) {
145 if (mapp != NULL)
146 *mapp = map;
147 return map__find_symbol(map, map->map_ip(map, addr));
148 }
149
150 return NULL;
151}
152
153struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
154{
155 struct symbol *sym;
156 struct map *pos;
157
158 down_read(&maps->lock);
159
160 maps__for_each_entry(maps, pos) {
161 sym = map__find_symbol_by_name(pos, name);
162
163 if (sym == NULL)
164 continue;
165 if (!map__contains_symbol(pos, sym)) {
166 sym = NULL;
167 continue;
168 }
169 if (mapp != NULL)
170 *mapp = pos;
171 goto out;
172 }
173
174 sym = NULL;
175out:
176 up_read(&maps->lock);
177 return sym;
178}
179
180int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
181{
182 if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
183 if (maps == NULL)
184 return -1;
185 ams->ms.map = maps__find(maps, ams->addr);
186 if (ams->ms.map == NULL)
187 return -1;
188 }
189
190 ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
191 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
192
193 return ams->ms.sym ? 0 : -1;
194}
195
196size_t maps__fprintf(struct maps *maps, FILE *fp)
197{
198 size_t printed = 0;
199 struct map *pos;
200
201 down_read(&maps->lock);
202
203 maps__for_each_entry(maps, pos) {
204 printed += fprintf(fp, "Map:");
205 printed += map__fprintf(pos, fp);
206 if (verbose > 2) {
207 printed += dso__fprintf(pos->dso, fp);
208 printed += fprintf(fp, "--\n");
209 }
210 }
211
212 up_read(&maps->lock);
213
214 return printed;
215}
216
217int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
218{
219 struct rb_root *root;
220 struct rb_node *next, *first;
221 int err = 0;
222
223 down_write(&maps->lock);
224
225 root = &maps->entries;
226
227 /*
228 * Find first map where end > map->start.
229 * Same as find_vma() in kernel.
230 */
231 next = root->rb_node;
232 first = NULL;
233 while (next) {
234 struct map *pos = rb_entry(next, struct map, rb_node);
235
236 if (pos->end > map->start) {
237 first = next;
238 if (pos->start <= map->start)
239 break;
240 next = next->rb_left;
241 } else
242 next = next->rb_right;
243 }
244
245 next = first;
246 while (next) {
247 struct map *pos = rb_entry(next, struct map, rb_node);
248 next = rb_next(&pos->rb_node);
249
250 /*
251 * Stop if current map starts after map->end.
252 * Maps are ordered by start: next will not overlap for sure.
253 */
254 if (pos->start >= map->end)
255 break;
256
257 if (verbose >= 2) {
258
259 if (use_browser) {
260 pr_debug("overlapping maps in %s (disable tui for more info)\n",
261 map->dso->name);
262 } else {
263 fputs("overlapping maps:\n", fp);
264 map__fprintf(map, fp);
265 map__fprintf(pos, fp);
266 }
267 }
268
269 rb_erase_init(&pos->rb_node, root);
270 /*
271 * Now check if we need to create new maps for areas not
272 * overlapped by the new map:
273 */
274 if (map->start > pos->start) {
275 struct map *before = map__clone(pos);
276
277 if (before == NULL) {
278 err = -ENOMEM;
279 goto put_map;
280 }
281
282 before->end = map->start;
283 __maps__insert(maps, before);
284 if (verbose >= 2 && !use_browser)
285 map__fprintf(before, fp);
286 map__put(before);
287 }
288
289 if (map->end < pos->end) {
290 struct map *after = map__clone(pos);
291
292 if (after == NULL) {
293 err = -ENOMEM;
294 goto put_map;
295 }
296
297 after->start = map->end;
298 after->pgoff += map->end - pos->start;
299 assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
300 __maps__insert(maps, after);
301 if (verbose >= 2 && !use_browser)
302 map__fprintf(after, fp);
303 map__put(after);
304 }
305put_map:
306 map__put(pos);
307
308 if (err)
309 goto out;
310 }
311
312 err = 0;
313out:
314 up_write(&maps->lock);
315 return err;
316}
317
318/*
319 * XXX This should not really _copy_ te maps, but refcount them.
320 */
321int maps__clone(struct thread *thread, struct maps *parent)
322{
323 struct maps *maps = thread->maps;
324 int err;
325 struct map *map;
326
327 down_read(&parent->lock);
328
329 maps__for_each_entry(parent, map) {
330 struct map *new = map__clone(map);
331
332 if (new == NULL) {
333 err = -ENOMEM;
334 goto out_unlock;
335 }
336
337 err = unwind__prepare_access(maps, new, NULL);
338 if (err)
339 goto out_unlock;
340
341 maps__insert(maps, new);
342 map__put(new);
343 }
344
345 err = 0;
346out_unlock:
347 up_read(&parent->lock);
348 return err;
349}
350
351static void __maps__insert(struct maps *maps, struct map *map)
352{
353 struct rb_node **p = &maps->entries.rb_node;
354 struct rb_node *parent = NULL;
355 const u64 ip = map->start;
356 struct map *m;
357
358 while (*p != NULL) {
359 parent = *p;
360 m = rb_entry(parent, struct map, rb_node);
361 if (ip < m->start)
362 p = &(*p)->rb_left;
363 else
364 p = &(*p)->rb_right;
365 }
366
367 rb_link_node(&map->rb_node, parent, p);
368 rb_insert_color(&map->rb_node, &maps->entries);
369 map__get(map);
370}
371
372struct map *maps__find(struct maps *maps, u64 ip)
373{
374 struct rb_node *p;
375 struct map *m;
376
377 down_read(&maps->lock);
378
379 p = maps->entries.rb_node;
380 while (p != NULL) {
381 m = rb_entry(p, struct map, rb_node);
382 if (ip < m->start)
383 p = p->rb_left;
384 else if (ip >= m->end)
385 p = p->rb_right;
386 else
387 goto out;
388 }
389
390 m = NULL;
391out:
392 up_read(&maps->lock);
393 return m;
394}
395
396struct map *maps__first(struct maps *maps)
397{
398 struct rb_node *first = rb_first(&maps->entries);
399
400 if (first)
401 return rb_entry(first, struct map, rb_node);
402 return NULL;
403}