Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <stdlib.h>
  4#include <linux/zalloc.h>
  5#include "debug.h"
  6#include "dso.h"
  7#include "map.h"
  8#include "maps.h"
 
  9#include "thread.h"
 10#include "ui/ui.h"
 11#include "unwind.h"
 
 12
 13struct map_rb_node {
 14	struct rb_node rb_node;
 15	struct map *map;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16};
 17
 18#define maps__for_each_entry(maps, map) \
 19	for (map = maps__first(maps); map; map = map_rb_node__next(map))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20
 21#define maps__for_each_entry_safe(maps, map, next) \
 22	for (map = maps__first(maps), next = map_rb_node__next(map); map; \
 23	     map = next, next = map_rb_node__next(map))
 
 
 
 
 
 
 24
 25static struct rb_root *maps__entries(struct maps *maps)
 26{
 27	return &RC_CHK_ACCESS(maps)->entries;
 28}
 29
 30static struct rw_semaphore *maps__lock(struct maps *maps)
 31{
 32	return &RC_CHK_ACCESS(maps)->lock;
 
 33}
 34
 35static struct map **maps__maps_by_name(struct maps *maps)
 
 
 
 
 
 
 
 
 
 
 
 36{
 37	return RC_CHK_ACCESS(maps)->maps_by_name;
 
 
 
 
 
 
 
 
 
 
 
 
 38}
 39
 40static struct map_rb_node *maps__first(struct maps *maps)
 41{
 42	struct rb_node *first = rb_first(maps__entries(maps));
 
 43
 44	if (first)
 45		return rb_entry(first, struct map_rb_node, rb_node);
 46	return NULL;
 47}
 48
 49static struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
 50{
 51	struct rb_node *next;
 
 52
 53	if (!node)
 54		return NULL;
 
 
 55
 56	next = rb_next(&node->rb_node);
 
 
 
 57
 58	if (!next)
 59		return NULL;
 
 
 60
 61	return rb_entry(next, struct map_rb_node, rb_node);
 
 
 
 62}
 63
 64static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
 65{
 66	struct map_rb_node *rb_node;
 
 67
 68	maps__for_each_entry(maps, rb_node) {
 69		if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
 70			return rb_node;
 71	}
 72	return NULL;
 
 
 
 
 
 
 
 
 
 73}
 74
 75static void maps__init(struct maps *maps, struct machine *machine)
 76{
 77	refcount_set(maps__refcnt(maps), 1);
 78	init_rwsem(maps__lock(maps));
 79	RC_CHK_ACCESS(maps)->entries = RB_ROOT;
 
 80	RC_CHK_ACCESS(maps)->machine = machine;
 81	RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
 
 
 
 
 82	RC_CHK_ACCESS(maps)->nr_maps = 0;
 83	RC_CHK_ACCESS(maps)->maps_by_name = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84}
 85
 86static void __maps__free_maps_by_name(struct maps *maps)
 87{
 
 
 
 88	/*
 89	 * Free everything to try to do it from the rbtree in the next search
 90	 */
 91	for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
 92		map__put(maps__maps_by_name(maps)[i]);
 93
 94	zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
 95	RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
 
 
 96}
 97
 98static int __maps__insert(struct maps *maps, struct map *map)
 99{
100	struct rb_node **p = &maps__entries(maps)->rb_node;
101	struct rb_node *parent = NULL;
102	const u64 ip = map__start(map);
103	struct map_rb_node *m, *new_rb_node;
104
105	new_rb_node = malloc(sizeof(*new_rb_node));
106	if (!new_rb_node)
107		return -ENOMEM;
108
109	RB_CLEAR_NODE(&new_rb_node->rb_node);
110	new_rb_node->map = map__get(map);
111
112	while (*p != NULL) {
113		parent = *p;
114		m = rb_entry(parent, struct map_rb_node, rb_node);
115		if (ip < map__start(m->map))
116			p = &(*p)->rb_left;
117		else
118			p = &(*p)->rb_right;
119	}
120
121	rb_link_node(&new_rb_node->rb_node, parent, p);
122	rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
123	return 0;
124}
125
126int maps__insert(struct maps *maps, struct map *map)
127{
128	int err;
129	const struct dso *dso = map__dso(map);
130
131	down_write(maps__lock(maps));
132	err = __maps__insert(maps, map);
133	if (err)
134		goto out;
 
 
135
136	++RC_CHK_ACCESS(maps)->nr_maps;
 
 
 
 
 
137
138	if (dso && dso->kernel) {
139		struct kmap *kmap = map__kmap(map);
 
 
 
 
 
140
141		if (kmap)
142			kmap->kmaps = maps;
143		else
144			pr_err("Internal error: kernel dso with non kernel map\n");
145	}
 
 
146
 
 
 
147
148	/*
149	 * If we already performed some search by name, then we need to add the just
150	 * inserted map and resort.
151	 */
152	if (maps__maps_by_name(maps)) {
153		if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
154			int nr_allocate = maps__nr_maps(maps) * 2;
155			struct map **maps_by_name = realloc(maps__maps_by_name(maps),
156							    nr_allocate * sizeof(map));
157
158			if (maps_by_name == NULL) {
159				__maps__free_maps_by_name(maps);
 
 
160				err = -ENOMEM;
161				goto out;
 
 
 
 
 
 
162			}
163
164			RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
165			RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
166		}
167		maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
168		__maps__sort_by_name(maps);
 
 
 
 
 
169	}
170 out:
171	up_write(maps__lock(maps));
172	return err;
173}
174
175static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
176{
177	rb_erase_init(&rb_node->rb_node, maps__entries(maps));
178	map__put(rb_node->map);
179	free(rb_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180}
181
182void maps__remove(struct maps *maps, struct map *map)
183{
184	struct map_rb_node *rb_node;
185
186	down_write(maps__lock(maps));
187	if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
188		RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
 
189
190	rb_node = maps__find_node(maps, map);
191	assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
192	__maps__remove(maps, rb_node);
193	if (maps__maps_by_name(maps))
194		__maps__free_maps_by_name(maps);
195	--RC_CHK_ACCESS(maps)->nr_maps;
196	up_write(maps__lock(maps));
 
 
 
197}
198
199static void __maps__purge(struct maps *maps)
200{
201	struct map_rb_node *pos, *next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
203	if (maps__maps_by_name(maps))
204		__maps__free_maps_by_name(maps);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
206	maps__for_each_entry_safe(maps, pos, next) {
207		rb_erase_init(&pos->rb_node,  maps__entries(maps));
208		map__put(pos->map);
209		free(pos);
210	}
 
211}
212
213static void maps__exit(struct maps *maps)
214{
 
 
215	down_write(maps__lock(maps));
216	__maps__purge(maps);
 
217	up_write(maps__lock(maps));
 
218}
219
220bool maps__empty(struct maps *maps)
221{
222	return !maps__first(maps);
223}
224
225struct maps *maps__new(struct machine *machine)
226{
227	struct maps *result;
228	RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
229
230	if (ADD_RC_CHK(result, maps))
231		maps__init(result, machine);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
233	return result;
234}
235
236static void maps__delete(struct maps *maps)
237{
238	maps__exit(maps);
239	unwind__finish_access(maps);
240	RC_CHK_FREE(maps);
 
241}
242
243struct maps *maps__get(struct maps *maps)
244{
245	struct maps *result;
246
247	if (RC_CHK_GET(result, maps))
248		refcount_inc(maps__refcnt(maps));
 
249
250	return result;
251}
252
253void maps__put(struct maps *maps)
254{
255	if (maps && refcount_dec_and_test(maps__refcnt(maps)))
256		maps__delete(maps);
257	else
258		RC_CHK_PUT(maps);
259}
260
261int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
262{
263	struct map_rb_node *pos;
264	int ret = 0;
265
266	down_read(maps__lock(maps));
267	maps__for_each_entry(maps, pos)	{
268		ret = cb(pos->map, data);
269		if (ret)
270			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271	}
272	up_read(maps__lock(maps));
273	return ret;
274}
275
276void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
277{
278	struct map_rb_node *pos, *next;
279	unsigned int start_nr_maps;
280
281	down_write(maps__lock(maps));
282
283	start_nr_maps = maps__nr_maps(maps);
284	maps__for_each_entry_safe(maps, pos, next)	{
285		if (cb(pos->map, data)) {
286			__maps__remove(maps, pos);
287			--RC_CHK_ACCESS(maps)->nr_maps;
288		}
289	}
290	if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps))
291		__maps__free_maps_by_name(maps);
292
293	up_write(maps__lock(maps));
294}
295
296struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
297{
298	struct map *map = maps__find(maps, addr);
 
299
300	/* Ensure map is loaded before using map->map_ip */
301	if (map != NULL && map__load(map) >= 0) {
302		if (mapp != NULL)
303			*mapp = map;
304		return map__find_symbol(map, map__map_ip(map, addr));
305	}
 
 
306
307	return NULL;
308}
309
310struct maps__find_symbol_by_name_args {
311	struct map **mapp;
312	const char *name;
313	struct symbol *sym;
314};
315
316static int maps__find_symbol_by_name_cb(struct map *map, void *data)
317{
318	struct maps__find_symbol_by_name_args *args = data;
319
320	args->sym = map__find_symbol_by_name(map, args->name);
321	if (!args->sym)
322		return 0;
323
324	if (!map__contains_symbol(map, args->sym)) {
325		args->sym = NULL;
326		return 0;
327	}
328
329	if (args->mapp != NULL)
330		*args->mapp = map__get(map);
331	return 1;
332}
333
334struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
335{
336	struct maps__find_symbol_by_name_args args = {
337		.mapp = mapp,
338		.name = name,
339		.sym = NULL,
340	};
341
342	maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
343	return args.sym;
344}
345
346int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
347{
348	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
349		if (maps == NULL)
350			return -1;
351		ams->ms.map = maps__find(maps, ams->addr);
352		if (ams->ms.map == NULL)
353			return -1;
354	}
355
356	ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
357	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
358
359	return ams->ms.sym ? 0 : -1;
360}
361
362struct maps__fprintf_args {
363	FILE *fp;
364	size_t printed;
365};
366
367static int maps__fprintf_cb(struct map *map, void *data)
368{
369	struct maps__fprintf_args *args = data;
370
371	args->printed += fprintf(args->fp, "Map:");
372	args->printed += map__fprintf(map, args->fp);
373	if (verbose > 2) {
374		args->printed += dso__fprintf(map__dso(map), args->fp);
375		args->printed += fprintf(args->fp, "--\n");
376	}
377	return 0;
378}
379
380size_t maps__fprintf(struct maps *maps, FILE *fp)
381{
382	struct maps__fprintf_args args = {
383		.fp = fp,
384		.printed = 0,
385	};
386
387	maps__for_each_map(maps, maps__fprintf_cb, &args);
388
389	return args.printed;
390}
391
392/*
393 * Find first map where end > map->start.
394 * Same as find_vma() in kernel.
395 */
396static struct rb_node *first_ending_after(struct maps *maps, const struct map *map)
397{
398	struct rb_root *root;
399	struct rb_node *next, *first;
 
 
 
 
400
401	root = maps__entries(maps);
402	next = root->rb_node;
403	first = NULL;
404	while (next) {
405		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
406
407		if (map__end(pos->map) > map__start(map)) {
408			first = next;
409			if (map__start(pos->map) <= map__start(map))
410				break;
411			next = next->rb_left;
 
412		} else
413			next = next->rb_right;
414	}
415	return first;
416}
417
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418/*
419 * Adds new to maps, if new overlaps existing entries then the existing maps are
420 * adjusted or removed so that new fits without overlapping any entries.
421 */
422int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
423{
424
425	struct rb_node *next;
426	int err = 0;
427	FILE *fp = debug_file();
 
428
429	down_write(maps__lock(maps));
 
430
431	next = first_ending_after(maps, new);
432	while (next && !err) {
433		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
434		next = rb_next(&pos->rb_node);
 
 
 
 
435
436		/*
437		 * Stop if current map starts after map->end.
438		 * Maps are ordered by start: next will not overlap for sure.
439		 */
440		if (map__start(pos->map) >= map__end(new))
441			break;
442
443		if (verbose >= 2) {
444
445			if (use_browser) {
446				pr_debug("overlapping maps in %s (disable tui for more info)\n",
447					 map__dso(new)->name);
448			} else {
449				pr_debug("overlapping maps:\n");
450				map__fprintf(new, fp);
451				map__fprintf(pos->map, fp);
452			}
453		}
454
455		rb_erase_init(&pos->rb_node, maps__entries(maps));
456		/*
457		 * Now check if we need to create new maps for areas not
458		 * overlapped by the new map:
459		 */
460		if (map__start(new) > map__start(pos->map)) {
461			struct map *before = map__clone(pos->map);
 
462
463			if (before == NULL) {
464				err = -ENOMEM;
465				goto put_map;
466			}
467
468			map__set_end(before, map__start(new));
469			err = __maps__insert(maps, before);
470			if (err) {
471				map__put(before);
472				goto put_map;
473			}
474
475			if (verbose >= 2 && !use_browser)
476				map__fprintf(before, fp);
477			map__put(before);
478		}
479
480		if (map__end(new) < map__end(pos->map)) {
481			struct map *after = map__clone(pos->map);
482
483			if (after == NULL) {
 
484				err = -ENOMEM;
485				goto put_map;
486			}
487
488			map__set_start(after, map__end(new));
489			map__add_pgoff(after, map__end(new) - map__start(pos->map));
490			assert(map__map_ip(pos->map, map__end(new)) ==
491				map__map_ip(after, map__end(new)));
492			err = __maps__insert(maps, after);
493			if (err) {
494				map__put(after);
495				goto put_map;
496			}
497			if (verbose >= 2 && !use_browser)
498				map__fprintf(after, fp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
499			map__put(after);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500		}
501put_map:
502		map__put(pos->map);
503		free(pos);
504	}
505	/* Add the map. */
506	err = __maps__insert(maps, new);
507	up_write(maps__lock(maps));
508	return err;
509}
510
511int maps__copy_from(struct maps *maps, struct maps *parent)
512{
513	int err;
514	struct map_rb_node *rb_node;
515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516	down_read(maps__lock(parent));
517
518	maps__for_each_entry(parent, rb_node) {
519		struct map *new = map__clone(rb_node->map);
 
 
 
 
 
 
520
521		if (new == NULL) {
522			err = -ENOMEM;
523			goto out_unlock;
 
 
 
 
 
 
 
 
524		}
525
526		err = unwind__prepare_access(maps, new, NULL);
527		if (err)
528			goto out_unlock;
529
530		err = maps__insert(maps, new);
531		if (err)
532			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
534		map__put(new);
 
 
 
 
 
 
 
 
535	}
 
536
537	err = 0;
538out_unlock:
539	up_read(maps__lock(parent));
 
540	return err;
541}
542
543struct map *maps__find(struct maps *maps, u64 ip)
544{
545	struct rb_node *p;
546	struct map_rb_node *m;
547
548
549	down_read(maps__lock(maps));
550
551	p = maps__entries(maps)->rb_node;
552	while (p != NULL) {
553		m = rb_entry(p, struct map_rb_node, rb_node);
554		if (ip < map__start(m->map))
555			p = p->rb_left;
556		else if (ip >= map__end(m->map))
557			p = p->rb_right;
558		else
559			goto out;
560	}
561
562	m = NULL;
563out:
564	up_read(maps__lock(maps));
565	return m ? m->map : NULL;
 
566}
567
568static int map__strcmp(const void *a, const void *b)
569{
570	const struct map *map_a = *(const struct map **)a;
571	const struct map *map_b = *(const struct map **)b;
572	const struct dso *dso_a = map__dso(map_a);
573	const struct dso *dso_b = map__dso(map_b);
574	int ret = strcmp(dso_a->short_name, dso_b->short_name);
575
576	if (ret == 0 && map_a != map_b) {
577		/*
578		 * Ensure distinct but name equal maps have an order in part to
579		 * aid reference counting.
580		 */
581		ret = (int)map__start(map_a) - (int)map__start(map_b);
582		if (ret == 0)
583			ret = (int)((intptr_t)map_a - (intptr_t)map_b);
 
 
 
 
 
 
 
584	}
585
586	return ret;
587}
588
589static int map__strcmp_name(const void *name, const void *b)
590{
591	const struct dso *dso = map__dso(*(const struct map **)b);
592
593	return strcmp(name, dso->short_name);
594}
595
596void __maps__sort_by_name(struct maps *maps)
597{
598	qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
599}
600
601static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
602{
603	struct map_rb_node *rb_node;
604	struct map **maps_by_name = realloc(maps__maps_by_name(maps),
605					    maps__nr_maps(maps) * sizeof(struct map *));
606	int i = 0;
607
608	if (maps_by_name == NULL)
609		return -1;
610
611	up_read(maps__lock(maps));
612	down_write(maps__lock(maps));
613
614	RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
615	RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
616
617	maps__for_each_entry(maps, rb_node)
618		maps_by_name[i++] = map__get(rb_node->map);
619
620	__maps__sort_by_name(maps);
621
622	up_write(maps__lock(maps));
623	down_read(maps__lock(maps));
624
625	return 0;
626}
627
628static struct map *__maps__find_by_name(struct maps *maps, const char *name)
629{
630	struct map **mapp;
631
632	if (maps__maps_by_name(maps) == NULL &&
633	    map__groups__sort_by_name_from_rbtree(maps))
634		return NULL;
635
636	mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
637		       sizeof(*mapp), map__strcmp_name);
638	if (mapp)
639		return *mapp;
640	return NULL;
641}
642
643struct map *maps__find_by_name(struct maps *maps, const char *name)
644{
645	struct map_rb_node *rb_node;
646	struct map *map;
647
648	down_read(maps__lock(maps));
649
650
651	if (RC_CHK_ACCESS(maps)->last_search_by_name) {
652		const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
653
654		if (strcmp(dso->short_name, name) == 0) {
655			map = RC_CHK_ACCESS(maps)->last_search_by_name;
656			goto out_unlock;
 
 
 
 
 
 
 
 
 
657		}
658	}
659	/*
660	 * If we have maps->maps_by_name, then the name isn't in the rbtree,
661	 * as maps->maps_by_name mirrors the rbtree when lookups by name are
662	 * made.
663	 */
664	map = __maps__find_by_name(maps, name);
665	if (map || maps__maps_by_name(maps) != NULL)
666		goto out_unlock;
667
668	/* Fallback to traversing the rbtree... */
669	maps__for_each_entry(maps, rb_node) {
670		struct dso *dso;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
671
672		map = rb_node->map;
673		dso = map__dso(map);
674		if (strcmp(dso->short_name, name) == 0) {
675			RC_CHK_ACCESS(maps)->last_search_by_name = map;
676			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
677		}
678	}
679	map = NULL;
680
681out_unlock:
682	up_read(maps__lock(maps));
683	return map;
684}
685
686struct map *maps__find_next_entry(struct maps *maps, struct map *map)
687{
688	struct map_rb_node *rb_node = maps__find_node(maps, map);
689	struct map_rb_node *next = map_rb_node__next(rb_node);
690
691	if (next)
692		return next->map;
 
 
 
 
 
 
 
693
694	return NULL;
 
695}
696
697void maps__fixup_end(struct maps *maps)
698{
699	struct map_rb_node *prev = NULL, *curr;
 
700
701	down_write(maps__lock(maps));
 
 
702
703	maps__for_each_entry(maps, curr) {
704		if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map)))
705			map__set_end(prev->map, map__start(curr->map));
 
 
706
707		prev = curr;
 
708	}
709
710	/*
711	 * We still haven't the actual symbols, so guess the
712	 * last map final address.
713	 */
714	if (curr && !map__end(curr->map))
715		map__set_end(curr->map, ~0ULL);
 
 
 
716
717	up_write(maps__lock(maps));
718}
719
720/*
721 * Merges map into maps by splitting the new map within the existing map
722 * regions.
723 */
724int maps__merge_in(struct maps *kmaps, struct map *new_map)
725{
726	struct map_rb_node *rb_node;
727	struct rb_node *first;
728	bool overlaps;
729	LIST_HEAD(merged);
730	int err = 0;
731
732	down_read(maps__lock(kmaps));
733	first = first_ending_after(kmaps, new_map);
734	rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL;
735	overlaps = rb_node && map__start(rb_node->map) < map__end(new_map);
736	up_read(maps__lock(kmaps));
737
738	if (!overlaps)
739		return maps__insert(kmaps, new_map);
740
741	maps__for_each_entry(kmaps, rb_node) {
742		struct map *old_map = rb_node->map;
743
744		/* no overload with this one */
745		if (map__end(new_map) < map__start(old_map) ||
746		    map__start(new_map) >= map__end(old_map))
747			continue;
748
749		if (map__start(new_map) < map__start(old_map)) {
750			/*
751			 * |new......
752			 *       |old....
753			 */
754			if (map__end(new_map) < map__end(old_map)) {
755				/*
756				 * |new......|     -> |new..|
757				 *       |old....| ->       |old....|
758				 */
759				map__set_end(new_map, map__start(old_map));
760			} else {
761				/*
762				 * |new.............| -> |new..|       |new..|
763				 *       |old....|    ->       |old....|
764				 */
765				struct map_list_node *m = map_list_node__new();
766
767				if (!m) {
768					err = -ENOMEM;
769					goto out;
770				}
771
772				m->map = map__clone(new_map);
773				if (!m->map) {
774					free(m);
775					err = -ENOMEM;
776					goto out;
777				}
778
779				map__set_end(m->map, map__start(old_map));
780				list_add_tail(&m->node, &merged);
781				map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
782				map__set_start(new_map, map__end(old_map));
783			}
784		} else {
785			/*
786			 *      |new......
787			 * |old....
788			 */
789			if (map__end(new_map) < map__end(old_map)) {
790				/*
791				 *      |new..|   -> x
792				 * |old.........| -> |old.........|
793				 */
794				map__put(new_map);
795				new_map = NULL;
796				break;
797			} else {
798				/*
799				 *      |new......| ->         |new...|
800				 * |old....|        -> |old....|
801				 */
802				map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
803				map__set_start(new_map, map__end(old_map));
804			}
805		}
806	}
 
 
807
808out:
809	while (!list_empty(&merged)) {
810		struct map_list_node *old_node;
811
812		old_node = list_entry(merged.next, struct map_list_node, node);
813		list_del_init(&old_node->node);
814		if (!err)
815			err = maps__insert(kmaps, old_node->map);
816		map__put(old_node->map);
817		free(old_node);
818	}
 
819
820	if (new_map) {
821		if (!err)
822			err = maps__insert(kmaps, new_map);
823		map__put(new_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
824	}
825	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
826}
827
828void maps__load_first(struct maps *maps)
829{
830	struct map_rb_node *first;
831
832	down_read(maps__lock(maps));
833
834	first = maps__first(maps);
835	if (first)
836		map__load(first->map);
837
838	up_read(maps__lock(maps));
839}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <stdlib.h>
   4#include <linux/zalloc.h>
   5#include "debug.h"
   6#include "dso.h"
   7#include "map.h"
   8#include "maps.h"
   9#include "rwsem.h"
  10#include "thread.h"
  11#include "ui/ui.h"
  12#include "unwind.h"
  13#include <internal/rc_check.h>
  14
  15/*
  16 * Locking/sorting note:
  17 *
  18 * Sorting is done with the write lock, iteration and binary searching happens
  19 * under the read lock requiring being sorted. There is a race between sorting
  20 * releasing the write lock and acquiring the read lock for iteration/searching
  21 * where another thread could insert and break the sorting of the maps. In
  22 * practice inserting maps should be rare meaning that the race shouldn't lead
  23 * to live lock. Removal of maps doesn't break being sorted.
  24 */
  25
  26DECLARE_RC_STRUCT(maps) {
  27	struct rw_semaphore lock;
  28	/**
  29	 * @maps_by_address: array of maps sorted by their starting address if
  30	 * maps_by_address_sorted is true.
  31	 */
  32	struct map	 **maps_by_address;
  33	/**
  34	 * @maps_by_name: optional array of maps sorted by their dso name if
  35	 * maps_by_name_sorted is true.
  36	 */
  37	struct map	 **maps_by_name;
  38	struct machine	 *machine;
  39#ifdef HAVE_LIBUNWIND_SUPPORT
  40	void		*addr_space;
  41	const struct unwind_libunwind_ops *unwind_libunwind_ops;
  42#endif
  43	refcount_t	 refcnt;
  44	/**
  45	 * @nr_maps: number of maps_by_address, and possibly maps_by_name,
  46	 * entries that contain maps.
  47	 */
  48	unsigned int	 nr_maps;
  49	/**
  50	 * @nr_maps_allocated: number of entries in maps_by_address and possibly
  51	 * maps_by_name.
  52	 */
  53	unsigned int	 nr_maps_allocated;
  54	/**
  55	 * @last_search_by_name_idx: cache of last found by name entry's index
  56	 * as frequent searches for the same dso name are common.
  57	 */
  58	unsigned int	 last_search_by_name_idx;
  59	/** @maps_by_address_sorted: is maps_by_address sorted. */
  60	bool		 maps_by_address_sorted;
  61	/** @maps_by_name_sorted: is maps_by_name sorted. */
  62	bool		 maps_by_name_sorted;
  63	/** @ends_broken: does the map contain a map where end values are unset/unsorted? */
  64	bool		 ends_broken;
  65};
  66
  67static void check_invariants(const struct maps *maps __maybe_unused)
  68{
  69#ifndef NDEBUG
  70	assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated);
  71	for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
  72		struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
  73
  74		/* Check map is well-formed. */
  75		assert(map__end(map) == 0 || map__start(map) <= map__end(map));
  76		/* Expect at least 1 reference count. */
  77		assert(refcount_read(map__refcnt(map)) > 0);
  78
  79		if (map__dso(map) && dso__kernel(map__dso(map)))
  80			assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
  81
  82		if (i > 0) {
  83			struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
  84
  85			/* If addresses are sorted... */
  86			if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) {
  87				/* Maps should be in start address order. */
  88				assert(map__start(prev) <= map__start(map));
  89				/*
  90				 * If the ends of maps aren't broken (during
  91				 * construction) then they should be ordered
  92				 * too.
  93				 */
  94				if (!RC_CHK_ACCESS(maps)->ends_broken) {
  95					assert(map__end(prev) <= map__end(map));
  96					assert(map__end(prev) <= map__start(map) ||
  97					       map__start(prev) == map__start(map));
  98				}
  99			}
 100		}
 101	}
 102	if (RC_CHK_ACCESS(maps)->maps_by_name) {
 103		for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
 104			struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
 105
 106			/*
 107			 * Maps by name maps should be in maps_by_address, so
 108			 * the reference count should be higher.
 109			 */
 110			assert(refcount_read(map__refcnt(map)) > 1);
 111		}
 112	}
 113#endif
 114}
 115
 116static struct map **maps__maps_by_address(const struct maps *maps)
 117{
 118	return RC_CHK_ACCESS(maps)->maps_by_address;
 119}
 120
 121static void maps__set_maps_by_address(struct maps *maps, struct map **new)
 122{
 123	RC_CHK_ACCESS(maps)->maps_by_address = new;
 124
 125}
 126
 127static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
 128{
 129	RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
 130}
 131
 132static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps)
 133{
 134	RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
 135}
 136
 137/* Not in the header, to aid reference counting. */
 138static struct map **maps__maps_by_name(const struct maps *maps)
 139{
 140	return RC_CHK_ACCESS(maps)->maps_by_name;
 141
 142}
 143
 144static void maps__set_maps_by_name(struct maps *maps, struct map **new)
 145{
 146	RC_CHK_ACCESS(maps)->maps_by_name = new;
 147
 148}
 149
 150static bool maps__maps_by_address_sorted(const struct maps *maps)
 151{
 152	return RC_CHK_ACCESS(maps)->maps_by_address_sorted;
 153}
 154
 155static void maps__set_maps_by_address_sorted(struct maps *maps, bool value)
 156{
 157	RC_CHK_ACCESS(maps)->maps_by_address_sorted = value;
 158}
 159
 160static bool maps__maps_by_name_sorted(const struct maps *maps)
 161{
 162	return RC_CHK_ACCESS(maps)->maps_by_name_sorted;
 163}
 164
 165static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
 166{
 167	RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
 168}
 169
 170struct machine *maps__machine(const struct maps *maps)
 171{
 172	return RC_CHK_ACCESS(maps)->machine;
 173}
 174
 175unsigned int maps__nr_maps(const struct maps *maps)
 176{
 177	return RC_CHK_ACCESS(maps)->nr_maps;
 178}
 179
 180refcount_t *maps__refcnt(struct maps *maps)
 181{
 182	return &RC_CHK_ACCESS(maps)->refcnt;
 183}
 184
 185#ifdef HAVE_LIBUNWIND_SUPPORT
 186void *maps__addr_space(const struct maps *maps)
 187{
 188	return RC_CHK_ACCESS(maps)->addr_space;
 189}
 190
 191void maps__set_addr_space(struct maps *maps, void *addr_space)
 192{
 193	RC_CHK_ACCESS(maps)->addr_space = addr_space;
 194}
 195
 196const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
 197{
 198	return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
 199}
 200
 201void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
 202{
 203	RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
 204}
 205#endif
 206
 207static struct rw_semaphore *maps__lock(struct maps *maps)
 208{
 209	return &RC_CHK_ACCESS(maps)->lock;
 210}
 211
 212static void maps__init(struct maps *maps, struct machine *machine)
 213{
 
 214	init_rwsem(maps__lock(maps));
 215	RC_CHK_ACCESS(maps)->maps_by_address = NULL;
 216	RC_CHK_ACCESS(maps)->maps_by_name = NULL;
 217	RC_CHK_ACCESS(maps)->machine = machine;
 218#ifdef HAVE_LIBUNWIND_SUPPORT
 219	RC_CHK_ACCESS(maps)->addr_space = NULL;
 220	RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL;
 221#endif
 222	refcount_set(maps__refcnt(maps), 1);
 223	RC_CHK_ACCESS(maps)->nr_maps = 0;
 224	RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
 225	RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0;
 226	RC_CHK_ACCESS(maps)->maps_by_address_sorted = true;
 227	RC_CHK_ACCESS(maps)->maps_by_name_sorted = false;
 228}
 229
 230static void maps__exit(struct maps *maps)
 231{
 232	struct map **maps_by_address = maps__maps_by_address(maps);
 233	struct map **maps_by_name = maps__maps_by_name(maps);
 234
 235	for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
 236		map__zput(maps_by_address[i]);
 237		if (maps_by_name)
 238			map__zput(maps_by_name[i]);
 239	}
 240	zfree(&maps_by_address);
 241	zfree(&maps_by_name);
 242	unwind__finish_access(maps);
 243}
 244
 245struct maps *maps__new(struct machine *machine)
 246{
 247	struct maps *result;
 248	RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
 249
 250	if (ADD_RC_CHK(result, maps))
 251		maps__init(result, machine);
 252
 253	return result;
 254}
 255
 256static void maps__delete(struct maps *maps)
 257{
 258	maps__exit(maps);
 259	RC_CHK_FREE(maps);
 260}
 261
 262struct maps *maps__get(struct maps *maps)
 263{
 264	struct maps *result;
 265
 266	if (RC_CHK_GET(result, maps))
 267		refcount_inc(maps__refcnt(maps));
 268
 269	return result;
 270}
 271
 272void maps__put(struct maps *maps)
 273{
 274	if (maps && refcount_dec_and_test(maps__refcnt(maps)))
 275		maps__delete(maps);
 276	else
 277		RC_CHK_PUT(maps);
 278}
 279
 280static void __maps__free_maps_by_name(struct maps *maps)
 281{
 282	if (!maps__maps_by_name(maps))
 283		return;
 284
 285	/*
 286	 * Free everything to try to do it from the rbtree in the next search
 287	 */
 288	for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
 289		map__put(maps__maps_by_name(maps)[i]);
 290
 291	zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
 292
 293	/* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
 294	maps__set_maps_by_name_sorted(maps, false);
 295}
 296
 297static int map__start_cmp(const void *a, const void *b)
 298{
 299	const struct map *map_a = *(const struct map * const *)a;
 300	const struct map *map_b = *(const struct map * const *)b;
 301	u64 map_a_start = map__start(map_a);
 302	u64 map_b_start = map__start(map_b);
 303
 304	if (map_a_start == map_b_start) {
 305		u64 map_a_end = map__end(map_a);
 306		u64 map_b_end = map__end(map_b);
 307
 308		if  (map_a_end == map_b_end) {
 309			/* Ensure maps with the same addresses have a fixed order. */
 310			if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b))
 311				return 0;
 312			return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b)
 313				? 1 : -1;
 314		}
 315		return map_a_end > map_b_end ? 1 : -1;
 
 
 316	}
 317	return map_a_start > map_b_start ? 1 : -1;
 
 
 
 318}
 319
 320static void __maps__sort_by_address(struct maps *maps)
 321{
 322	if (maps__maps_by_address_sorted(maps))
 323		return;
 324
 325	qsort(maps__maps_by_address(maps),
 326		maps__nr_maps(maps),
 327		sizeof(struct map *),
 328		map__start_cmp);
 329	maps__set_maps_by_address_sorted(maps, true);
 330}
 331
 332static void maps__sort_by_address(struct maps *maps)
 333{
 334	down_write(maps__lock(maps));
 335	__maps__sort_by_address(maps);
 336	up_write(maps__lock(maps));
 337}
 338
 339static int map__strcmp(const void *a, const void *b)
 340{
 341	const struct map *map_a = *(const struct map * const *)a;
 342	const struct map *map_b = *(const struct map * const *)b;
 343	const struct dso *dso_a = map__dso(map_a);
 344	const struct dso *dso_b = map__dso(map_b);
 345	int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b));
 346
 347	if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
 348		/* Ensure distinct but name equal maps have an order. */
 349		return map__start_cmp(a, b);
 
 350	}
 351	return ret;
 352}
 353
 354static int maps__sort_by_name(struct maps *maps)
 355{
 356	int err = 0;
 357
 358	down_write(maps__lock(maps));
 359	if (!maps__maps_by_name_sorted(maps)) {
 360		struct map **maps_by_name = maps__maps_by_name(maps);
 
 
 
 
 
 
 361
 362		if (!maps_by_name) {
 363			maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated *
 364					sizeof(*maps_by_name));
 365			if (!maps_by_name)
 366				err = -ENOMEM;
 367			else {
 368				struct map **maps_by_address = maps__maps_by_address(maps);
 369				unsigned int n = maps__nr_maps(maps);
 370
 371				maps__set_maps_by_name(maps, maps_by_name);
 372				for (unsigned int i = 0; i < n; i++)
 373					maps_by_name[i] = map__get(maps_by_address[i]);
 374			}
 
 
 
 375		}
 376		if (!err) {
 377			qsort(maps_by_name,
 378				maps__nr_maps(maps),
 379				sizeof(struct map *),
 380				map__strcmp);
 381			maps__set_maps_by_name_sorted(maps, true);
 382		}
 383	}
 384	check_invariants(maps);
 385	up_write(maps__lock(maps));
 386	return err;
 387}
 388
 389static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
 390{
 391	struct map **maps_by_address = maps__maps_by_address(maps);
 392
 393	if (maps__maps_by_address_sorted(maps)) {
 394		struct map **mapp =
 395			bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
 396				sizeof(*mapp), map__start_cmp);
 397
 398		if (mapp)
 399			return mapp - maps_by_address;
 400	} else {
 401		for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
 402			if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
 403				return i;
 404		}
 405	}
 406	pr_err("Map missing from maps");
 407	return -1;
 408}
 409
 410static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
 411{
 412	struct map **maps_by_name = maps__maps_by_name(maps);
 413
 414	if (maps__maps_by_name_sorted(maps)) {
 415		struct map **mapp =
 416			bsearch(&map, maps_by_name, maps__nr_maps(maps),
 417				sizeof(*mapp), map__strcmp);
 418
 419		if (mapp)
 420			return mapp - maps_by_name;
 421	} else {
 422		for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
 423			if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
 424				return i;
 425		}
 426	}
 427	pr_err("Map missing from maps");
 428	return -1;
 429}
 430
 431static int __maps__insert(struct maps *maps, struct map *new)
 432{
 433	struct map **maps_by_address = maps__maps_by_address(maps);
 434	struct map **maps_by_name = maps__maps_by_name(maps);
 435	const struct dso *dso = map__dso(new);
 436	unsigned int nr_maps = maps__nr_maps(maps);
 437	unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
 438
 439	if (nr_maps + 1 > nr_allocate) {
 440		nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
 441
 442		maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new));
 443		if (!maps_by_address)
 444			return -ENOMEM;
 445
 446		maps__set_maps_by_address(maps, maps_by_address);
 447		if (maps_by_name) {
 448			maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new));
 449			if (!maps_by_name) {
 450				/*
 451				 * If by name fails, just disable by name and it will
 452				 * recompute next time it is required.
 453				 */
 454				__maps__free_maps_by_name(maps);
 455			}
 456			maps__set_maps_by_name(maps, maps_by_name);
 457		}
 458		RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
 459	}
 460	/* Insert the value at the end. */
 461	maps_by_address[nr_maps] = map__get(new);
 462	if (maps_by_name)
 463		maps_by_name[nr_maps] = map__get(new);
 464
 465	nr_maps++;
 466	RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
 467
 468	/*
 469	 * Recompute if things are sorted. If things are inserted in a sorted
 470	 * manner, for example by processing /proc/pid/maps, then no
 471	 * sorting/resorting will be necessary.
 472	 */
 473	if (nr_maps == 1) {
 474		/* If there's just 1 entry then maps are sorted. */
 475		maps__set_maps_by_address_sorted(maps, true);
 476		maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
 477	} else {
 478		/* Sorted if maps were already sorted and this map starts after the last one. */
 479		maps__set_maps_by_address_sorted(maps,
 480			maps__maps_by_address_sorted(maps) &&
 481			map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
 482		maps__set_maps_by_name_sorted(maps, false);
 483	}
 484	if (map__end(new) < map__start(new))
 485		RC_CHK_ACCESS(maps)->ends_broken = true;
 486	if (dso && dso__kernel(dso)) {
 487		struct kmap *kmap = map__kmap(new);
 488
 489		if (kmap)
 490			kmap->kmaps = maps;
 491		else
 492			pr_err("Internal error: kernel dso with non kernel map\n");
 493	}
 494	return 0;
 495}
 496
 497int maps__insert(struct maps *maps, struct map *map)
 498{
 499	int ret;
 500
 501	down_write(maps__lock(maps));
 502	ret = __maps__insert(maps, map);
 503	check_invariants(maps);
 504	up_write(maps__lock(maps));
 505	return ret;
 506}
 507
 508static void __maps__remove(struct maps *maps, struct map *map)
 
 
 
 
 
 509{
 510	struct map **maps_by_address = maps__maps_by_address(maps);
 511	struct map **maps_by_name = maps__maps_by_name(maps);
 512	unsigned int nr_maps = maps__nr_maps(maps);
 513	unsigned int address_idx;
 514
 515	/* Slide later mappings over the one to remove */
 516	address_idx = maps__by_address_index(maps, map);
 517	map__put(maps_by_address[address_idx]);
 518	memmove(&maps_by_address[address_idx],
 519		&maps_by_address[address_idx + 1],
 520		(nr_maps - address_idx - 1) * sizeof(*maps_by_address));
 521
 522	if (maps_by_name) {
 523		unsigned int name_idx = maps__by_name_index(maps, map);
 524
 525		map__put(maps_by_name[name_idx]);
 526		memmove(&maps_by_name[name_idx],
 527			&maps_by_name[name_idx + 1],
 528			(nr_maps - name_idx - 1) *  sizeof(*maps_by_name));
 529	}
 530
 531	--RC_CHK_ACCESS(maps)->nr_maps;
 532}
 533
 534void maps__remove(struct maps *maps, struct map *map)
 535{
 536	down_write(maps__lock(maps));
 537	__maps__remove(maps, map);
 538	check_invariants(maps);
 539	up_write(maps__lock(maps));
 540}
 541
 542bool maps__empty(struct maps *maps)
 543{
 544	bool res;
 545
 546	down_read(maps__lock(maps));
 547	res = maps__nr_maps(maps) == 0;
 548	up_read(maps__lock(maps));
 549
 550	return res;
 551}
 552
 553bool maps__equal(struct maps *a, struct maps *b)
 554{
 555	return RC_CHK_EQUAL(a, b);
 
 
 
 556}
 557
 558int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
 559{
 560	bool done = false;
 561	int ret = 0;
 562
 563	/* See locking/sorting note. */
 564	while (!done) {
 565		down_read(maps__lock(maps));
 566		if (maps__maps_by_address_sorted(maps)) {
 567			/*
 568			 * maps__for_each_map callbacks may buggily/unsafely
 569			 * insert into maps_by_address. Deliberately reload
 570			 * maps__nr_maps and maps_by_address on each iteration
 571			 * to avoid using memory freed by maps__insert growing
 572			 * the array - this may cause maps to be skipped or
 573			 * repeated.
 574			 */
 575			for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
 576				struct map **maps_by_address = maps__maps_by_address(maps);
 577				struct map *map = maps_by_address[i];
 578
 579				ret = cb(map, data);
 580				if (ret)
 581					break;
 582			}
 583			done = true;
 584		}
 585		up_read(maps__lock(maps));
 586		if (!done)
 587			maps__sort_by_address(maps);
 588	}
 
 589	return ret;
 590}
 591
 592void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
 593{
 594	struct map **maps_by_address;
 
 595
 596	down_write(maps__lock(maps));
 597
 598	maps_by_address = maps__maps_by_address(maps);
 599	for (unsigned int i = 0; i < maps__nr_maps(maps);) {
 600		if (cb(maps_by_address[i], data))
 601			__maps__remove(maps, maps_by_address[i]);
 602		else
 603			i++;
 604	}
 605	check_invariants(maps);
 
 
 606	up_write(maps__lock(maps));
 607}
 608
 609struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
 610{
 611	struct map *map = maps__find(maps, addr);
 612	struct symbol *result = NULL;
 613
 614	/* Ensure map is loaded before using map->map_ip */
 615	if (map != NULL && map__load(map) >= 0)
 616		result = map__find_symbol(map, map__map_ip(map, addr));
 617
 618	if (mapp)
 619		*mapp = map;
 620	else
 621		map__put(map);
 622
 623	return result;
 624}
 625
 626struct maps__find_symbol_by_name_args {
 627	struct map **mapp;
 628	const char *name;
 629	struct symbol *sym;
 630};
 631
 632static int maps__find_symbol_by_name_cb(struct map *map, void *data)
 633{
 634	struct maps__find_symbol_by_name_args *args = data;
 635
 636	args->sym = map__find_symbol_by_name(map, args->name);
 637	if (!args->sym)
 638		return 0;
 639
 640	if (!map__contains_symbol(map, args->sym)) {
 641		args->sym = NULL;
 642		return 0;
 643	}
 644
 645	if (args->mapp != NULL)
 646		*args->mapp = map__get(map);
 647	return 1;
 648}
 649
 650struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
 651{
 652	struct maps__find_symbol_by_name_args args = {
 653		.mapp = mapp,
 654		.name = name,
 655		.sym = NULL,
 656	};
 657
 658	maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
 659	return args.sym;
 660}
 661
 662int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
 663{
 664	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
 665		if (maps == NULL)
 666			return -1;
 667		ams->ms.map = maps__find(maps, ams->addr);
 668		if (ams->ms.map == NULL)
 669			return -1;
 670	}
 671
 672	ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
 673	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
 674
 675	return ams->ms.sym ? 0 : -1;
 676}
 677
 678struct maps__fprintf_args {
 679	FILE *fp;
 680	size_t printed;
 681};
 682
 683static int maps__fprintf_cb(struct map *map, void *data)
 684{
 685	struct maps__fprintf_args *args = data;
 686
 687	args->printed += fprintf(args->fp, "Map:");
 688	args->printed += map__fprintf(map, args->fp);
 689	if (verbose > 2) {
 690		args->printed += dso__fprintf(map__dso(map), args->fp);
 691		args->printed += fprintf(args->fp, "--\n");
 692	}
 693	return 0;
 694}
 695
 696size_t maps__fprintf(struct maps *maps, FILE *fp)
 697{
 698	struct maps__fprintf_args args = {
 699		.fp = fp,
 700		.printed = 0,
 701	};
 702
 703	maps__for_each_map(maps, maps__fprintf_cb, &args);
 704
 705	return args.printed;
 706}
 707
 708/*
 709 * Find first map where end > map->start.
 710 * Same as find_vma() in kernel.
 711 */
 712static unsigned int first_ending_after(struct maps *maps, const struct map *map)
 713{
 714	struct map **maps_by_address = maps__maps_by_address(maps);
 715	int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
 716
 717	assert(maps__maps_by_address_sorted(maps));
 718	if (low <= high && map__end(maps_by_address[0]) > map__start(map))
 719		return 0;
 720
 721	while (low <= high) {
 722		int mid = (low + high) / 2;
 723		struct map *pos = maps_by_address[mid];
 724
 725		if (map__end(pos) > map__start(map)) {
 726			first = mid;
 727			if (map__start(pos) <= map__start(map)) {
 728				/* Entry overlaps map. */
 
 729				break;
 730			}
 731			high = mid - 1;
 732		} else
 733			low = mid + 1;
 734	}
 735	return first;
 736}
 737
 738static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index,
 739				 struct map *new1, struct map *new2)
 740{
 741	struct map **maps_by_address = maps__maps_by_address(maps);
 742	struct map **maps_by_name = maps__maps_by_name(maps);
 743	unsigned int nr_maps = maps__nr_maps(maps);
 744	unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
 745	unsigned int to_add = new2 ? 2 : 1;
 746
 747	assert(maps__maps_by_address_sorted(maps));
 748	assert(first_after_index == nr_maps ||
 749	       map__end(new1) <= map__start(maps_by_address[first_after_index]));
 750	assert(!new2 || map__end(new1) <= map__start(new2));
 751	assert(first_after_index == nr_maps || !new2 ||
 752	       map__end(new2) <= map__start(maps_by_address[first_after_index]));
 753
 754	if (nr_maps + to_add > nr_allocate) {
 755		nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
 756
 757		maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1));
 758		if (!maps_by_address)
 759			return -ENOMEM;
 760
 761		maps__set_maps_by_address(maps, maps_by_address);
 762		if (maps_by_name) {
 763			maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1));
 764			if (!maps_by_name) {
 765				/*
 766				 * If by name fails, just disable by name and it will
 767				 * recompute next time it is required.
 768				 */
 769				__maps__free_maps_by_name(maps);
 770			}
 771			maps__set_maps_by_name(maps, maps_by_name);
 772		}
 773		RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
 774	}
 775	memmove(&maps_by_address[first_after_index+to_add],
 776		&maps_by_address[first_after_index],
 777		(nr_maps - first_after_index) * sizeof(new1));
 778	maps_by_address[first_after_index] = map__get(new1);
 779	if (maps_by_name)
 780		maps_by_name[nr_maps] = map__get(new1);
 781	if (new2) {
 782		maps_by_address[first_after_index + 1] = map__get(new2);
 783		if (maps_by_name)
 784			maps_by_name[nr_maps + 1] = map__get(new2);
 785	}
 786	RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add;
 787	maps__set_maps_by_name_sorted(maps, false);
 788	check_invariants(maps);
 789	return 0;
 790}
 791
 792/*
 793 * Adds new to maps, if new overlaps existing entries then the existing maps are
 794 * adjusted or removed so that new fits without overlapping any entries.
 795 */
 796static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
 797{
 
 
 798	int err = 0;
 799	FILE *fp = debug_file();
 800	unsigned int i;
 801
 802	if (!maps__maps_by_address_sorted(maps))
 803		__maps__sort_by_address(maps);
 804
 805	/*
 806	 * Iterate through entries where the end of the existing entry is
 807	 * greater-than the new map's start.
 808	 */
 809	for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
 810		struct map **maps_by_address = maps__maps_by_address(maps);
 811		struct map *pos = maps_by_address[i];
 812		struct map *before = NULL, *after = NULL;
 813
 814		/*
 815		 * Stop if current map starts after map->end.
 816		 * Maps are ordered by start: next will not overlap for sure.
 817		 */
 818		if (map__start(pos) >= map__end(new))
 819			break;
 820
 821		if (use_browser) {
 822			pr_debug("overlapping maps in %s (disable tui for more info)\n",
 823				dso__name(map__dso(new)));
 824		} else if (verbose >= 2) {
 825			pr_debug("overlapping maps:\n");
 826			map__fprintf(new, fp);
 827			map__fprintf(pos, fp);
 
 
 
 828		}
 829
 
 830		/*
 831		 * Now check if we need to create new maps for areas not
 832		 * overlapped by the new map:
 833		 */
 834		if (map__start(new) > map__start(pos)) {
 835			/* Map starts within existing map. Need to shorten the existing map. */
 836			before = map__clone(pos);
 837
 838			if (before == NULL) {
 839				err = -ENOMEM;
 840				goto out_err;
 841			}
 
 842			map__set_end(before, map__start(new));
 
 
 
 
 
 843
 844			if (verbose >= 2 && !use_browser)
 845				map__fprintf(before, fp);
 
 846		}
 847		if (map__end(new) < map__end(pos)) {
 848			/* The new map isn't as long as the existing map. */
 849			after = map__clone(pos);
 850
 851			if (after == NULL) {
 852				map__zput(before);
 853				err = -ENOMEM;
 854				goto out_err;
 855			}
 856
 857			map__set_start(after, map__end(new));
 858			map__add_pgoff(after, map__end(new) - map__start(pos));
 859			assert(map__map_ip(pos, map__end(new)) ==
 860			       map__map_ip(after, map__end(new)));
 861
 
 
 
 
 862			if (verbose >= 2 && !use_browser)
 863				map__fprintf(after, fp);
 864		}
 865		/*
 866		 * If adding one entry, for `before` or `after`, we can replace
 867		 * the existing entry. If both `before` and `after` are
 868		 * necessary than an insert is needed. If the existing entry
 869		 * entirely overlaps the existing entry it can just be removed.
 870		 */
 871		if (before) {
 872			map__put(maps_by_address[i]);
 873			maps_by_address[i] = before;
 874			/* Maps are still ordered, go to next one. */
 875			i++;
 876			if (after) {
 877				/*
 878				 * 'before' and 'after' mean 'new' split the
 879				 * 'pos' mapping and therefore there are no
 880				 * later mappings.
 881				 */
 882				err = __maps__insert_sorted(maps, i, new, after);
 883				map__put(after);
 884				check_invariants(maps);
 885				return err;
 886			}
 887			check_invariants(maps);
 888		} else if (after) {
 889			/*
 890			 * 'after' means 'new' split 'pos' and there are no
 891			 * later mappings.
 892			 */
 893			map__put(maps_by_address[i]);
 894			maps_by_address[i] = map__get(new);
 895			err = __maps__insert_sorted(maps, i + 1, after, NULL);
 896			map__put(after);
 897			check_invariants(maps);
 898			return err;
 899		} else {
 900			struct map *next = NULL;
 901
 902			if (i + 1 < maps__nr_maps(maps))
 903				next = maps_by_address[i + 1];
 904
 905			if (!next  || map__start(next) >= map__end(new)) {
 906				/*
 907				 * Replace existing mapping and end knowing
 908				 * there aren't later overlapping or any
 909				 * mappings.
 910				 */
 911				map__put(maps_by_address[i]);
 912				maps_by_address[i] = map__get(new);
 913				check_invariants(maps);
 914				return err;
 915			}
 916			__maps__remove(maps, pos);
 917			check_invariants(maps);
 918			/*
 919			 * Maps are ordered but no need to increase `i` as the
 920			 * later maps were moved down.
 921			 */
 922		}
 
 
 
 923	}
 924	/* Add the map. */
 925	err = __maps__insert_sorted(maps, i, new, NULL);
 926out_err:
 927	return err;
 928}
 929
 930int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
 931{
 932	int err;
 
 933
 934	down_write(maps__lock(maps));
 935	err =  __maps__fixup_overlap_and_insert(maps, new);
 936	up_write(maps__lock(maps));
 937	return err;
 938}
 939
 940int maps__copy_from(struct maps *dest, struct maps *parent)
 941{
 942	/* Note, if struct map were immutable then cloning could use ref counts. */
 943	struct map **parent_maps_by_address;
 944	int err = 0;
 945	unsigned int n;
 946
 947	down_write(maps__lock(dest));
 948	down_read(maps__lock(parent));
 949
 950	parent_maps_by_address = maps__maps_by_address(parent);
 951	n = maps__nr_maps(parent);
 952	if (maps__nr_maps(dest) == 0) {
 953		/* No existing mappings so just copy from parent to avoid reallocs in insert. */
 954		unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated;
 955		struct map **dest_maps_by_address =
 956			malloc(nr_maps_allocated * sizeof(struct map *));
 957		struct map **dest_maps_by_name = NULL;
 958
 959		if (!dest_maps_by_address)
 960			err = -ENOMEM;
 961		else {
 962			if (maps__maps_by_name(parent)) {
 963				dest_maps_by_name =
 964					malloc(nr_maps_allocated * sizeof(struct map *));
 965			}
 966
 967			RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address;
 968			RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name;
 969			RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated;
 970		}
 971
 972		for (unsigned int i = 0; !err && i < n; i++) {
 973			struct map *pos = parent_maps_by_address[i];
 974			struct map *new = map__clone(pos);
 975
 976			if (!new)
 977				err = -ENOMEM;
 978			else {
 979				err = unwind__prepare_access(dest, new, NULL);
 980				if (!err) {
 981					dest_maps_by_address[i] = new;
 982					if (dest_maps_by_name)
 983						dest_maps_by_name[i] = map__get(new);
 984					RC_CHK_ACCESS(dest)->nr_maps = i + 1;
 985				}
 986			}
 987			if (err)
 988				map__put(new);
 989		}
 990		maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
 991		if (!err) {
 992			RC_CHK_ACCESS(dest)->last_search_by_name_idx =
 993				RC_CHK_ACCESS(parent)->last_search_by_name_idx;
 994			maps__set_maps_by_name_sorted(dest,
 995						dest_maps_by_name &&
 996						maps__maps_by_name_sorted(parent));
 997		} else {
 998			RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
 999			maps__set_maps_by_name_sorted(dest, false);
1000		}
1001	} else {
1002		/* Unexpected copying to a maps containing entries. */
1003		for (unsigned int i = 0; !err && i < n; i++) {
1004			struct map *pos = parent_maps_by_address[i];
1005			struct map *new = map__clone(pos);
1006
1007			if (!new)
1008				err = -ENOMEM;
1009			else {
1010				err = unwind__prepare_access(dest, new, NULL);
1011				if (!err)
1012					err = __maps__insert(dest, new);
1013			}
1014			map__put(new);
1015		}
1016	}
1017	check_invariants(dest);
1018
 
 
1019	up_read(maps__lock(parent));
1020	up_write(maps__lock(dest));
1021	return err;
1022}
1023
1024static int map__addr_cmp(const void *key, const void *entry)
1025{
1026	const u64 ip = *(const u64 *)key;
1027	const struct map *map = *(const struct map * const *)entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028
1029	if (ip < map__start(map))
1030		return -1;
1031	if (ip >= map__end(map))
1032		return 1;
1033	return 0;
1034}
1035
1036struct map *maps__find(struct maps *maps, u64 ip)
1037{
1038	struct map *result = NULL;
1039	bool done = false;
 
 
 
1040
1041	/* See locking/sorting note. */
1042	while (!done) {
1043		down_read(maps__lock(maps));
1044		if (maps__maps_by_address_sorted(maps)) {
1045			struct map **mapp =
1046				bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps),
1047					sizeof(*mapp), map__addr_cmp);
1048
1049			if (mapp)
1050				result = map__get(*mapp);
1051			done = true;
1052		}
1053		up_read(maps__lock(maps));
1054		if (!done)
1055			maps__sort_by_address(maps);
1056	}
1057	return result;
 
1058}
1059
1060static int map__strcmp_name(const void *name, const void *b)
1061{
1062	const struct dso *dso = map__dso(*(const struct map **)b);
1063
1064	return strcmp(name, dso__short_name(dso));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065}
1066
1067struct map *maps__find_by_name(struct maps *maps, const char *name)
1068{
1069	struct map *result = NULL;
1070	bool done = false;
 
 
 
1071
1072	/* See locking/sorting note. */
1073	while (!done) {
1074		unsigned int i;
1075
1076		down_read(maps__lock(maps));
1077
1078		/* First check last found entry. */
1079		i = RC_CHK_ACCESS(maps)->last_search_by_name_idx;
1080		if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
1081			struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
1082
1083			if (dso && strcmp(dso__short_name(dso), name) == 0) {
1084				result = map__get(maps__maps_by_name(maps)[i]);
1085				done = true;
1086			}
1087		}
 
 
 
 
 
 
 
 
 
1088
1089		/* Second search sorted array. */
1090		if (!done && maps__maps_by_name_sorted(maps)) {
1091			struct map **mapp =
1092				bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
1093					sizeof(*mapp), map__strcmp_name);
1094
1095			if (mapp) {
1096				result = map__get(*mapp);
1097				i = mapp - maps__maps_by_name(maps);
1098				RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
1099			}
1100			done = true;
1101		}
1102		up_read(maps__lock(maps));
1103		if (!done) {
1104			/* Sort and retry binary search. */
1105			if (maps__sort_by_name(maps)) {
1106				/*
1107				 * Memory allocation failed do linear search
1108				 * through address sorted maps.
1109				 */
1110				struct map **maps_by_address;
1111				unsigned int n;
1112
1113				down_read(maps__lock(maps));
1114				maps_by_address =  maps__maps_by_address(maps);
1115				n = maps__nr_maps(maps);
1116				for (i = 0; i < n; i++) {
1117					struct map *pos = maps_by_address[i];
1118					struct dso *dso = map__dso(pos);
1119
1120					if (dso && strcmp(dso__short_name(dso), name) == 0) {
1121						result = map__get(pos);
1122						break;
1123					}
1124				}
1125				up_read(maps__lock(maps));
1126				done = true;
1127			}
1128		}
1129	}
1130	return result;
 
 
 
 
1131}
1132
1133struct map *maps__find_next_entry(struct maps *maps, struct map *map)
1134{
1135	unsigned int i;
1136	struct map *result = NULL;
1137
1138	down_read(maps__lock(maps));
1139	while (!maps__maps_by_address_sorted(maps)) {
1140		up_read(maps__lock(maps));
1141		maps__sort_by_address(maps);
1142		down_read(maps__lock(maps));
1143	}
1144	i = maps__by_address_index(maps, map);
1145	if (++i < maps__nr_maps(maps))
1146		result = map__get(maps__maps_by_address(maps)[i]);
1147
1148	up_read(maps__lock(maps));
1149	return result;
1150}
1151
1152void maps__fixup_end(struct maps *maps)
1153{
1154	struct map **maps_by_address;
1155	unsigned int n;
1156
1157	down_write(maps__lock(maps));
1158	if (!maps__maps_by_address_sorted(maps))
1159		__maps__sort_by_address(maps);
1160
1161	maps_by_address = maps__maps_by_address(maps);
1162	n = maps__nr_maps(maps);
1163	for (unsigned int i = 1; i < n; i++) {
1164		struct map *prev = maps_by_address[i - 1];
1165		struct map *curr = maps_by_address[i];
1166
1167		if (!map__end(prev) || map__end(prev) > map__start(curr))
1168			map__set_end(prev, map__start(curr));
1169	}
1170
1171	/*
1172	 * We still haven't the actual symbols, so guess the
1173	 * last map final address.
1174	 */
1175	if (n > 0 && !map__end(maps_by_address[n - 1]))
1176		map__set_end(maps_by_address[n - 1], ~0ULL);
1177
1178	RC_CHK_ACCESS(maps)->ends_broken = false;
1179	check_invariants(maps);
1180
1181	up_write(maps__lock(maps));
1182}
1183
1184/*
1185 * Merges map into maps by splitting the new map within the existing map
1186 * regions.
1187 */
1188int maps__merge_in(struct maps *kmaps, struct map *new_map)
1189{
1190	unsigned int first_after_, kmaps__nr_maps;
1191	struct map **kmaps_maps_by_address;
1192	struct map **merged_maps_by_address;
1193	unsigned int merged_nr_maps_allocated;
1194
1195	/* First try under a read lock. */
1196	while (true) {
1197		down_read(maps__lock(kmaps));
1198		if (maps__maps_by_address_sorted(kmaps))
1199			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1200
1201		up_read(maps__lock(kmaps));
 
 
 
 
 
1202
1203		/* First after binary search requires sorted maps. Sort and try again. */
1204		maps__sort_by_address(kmaps);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1205	}
1206	first_after_ = first_ending_after(kmaps, new_map);
1207	kmaps_maps_by_address = maps__maps_by_address(kmaps);
1208
1209	if (first_after_ >= maps__nr_maps(kmaps) ||
1210	    map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1211		/* No overlap so regular insert suffices. */
1212		up_read(maps__lock(kmaps));
1213		return maps__insert(kmaps, new_map);
 
 
 
 
 
1214	}
1215	up_read(maps__lock(kmaps));
1216
1217	/* Plain insert with a read-lock failed, try again now with the write lock. */
1218	down_write(maps__lock(kmaps));
1219	if (!maps__maps_by_address_sorted(kmaps))
1220		__maps__sort_by_address(kmaps);
1221
1222	first_after_ = first_ending_after(kmaps, new_map);
1223	kmaps_maps_by_address = maps__maps_by_address(kmaps);
1224	kmaps__nr_maps = maps__nr_maps(kmaps);
1225
1226	if (first_after_ >= kmaps__nr_maps ||
1227	    map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1228		/* No overlap so regular insert suffices. */
1229		int ret = __maps__insert(kmaps, new_map);
1230
1231		check_invariants(kmaps);
1232		up_write(maps__lock(kmaps));
1233		return ret;
1234	}
1235	/* Array to merge into, possibly 1 more for the sake of new_map. */
1236	merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated;
1237	if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
1238		merged_nr_maps_allocated++;
1239
1240	merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address));
1241	if (!merged_maps_by_address) {
1242		up_write(maps__lock(kmaps));
1243		return -ENOMEM;
1244	}
1245	maps__set_maps_by_address(kmaps, merged_maps_by_address);
1246	maps__set_maps_by_address_sorted(kmaps, true);
1247	__maps__free_maps_by_name(kmaps);
1248	maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
1249
1250	/* Copy entries before the new_map that can't overlap. */
1251	for (unsigned int i = 0; i < first_after_; i++)
1252		merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
1253
1254	maps__set_nr_maps(kmaps, first_after_);
1255
1256	/* Add the new map, it will be split when the later overlapping mappings are added. */
1257	__maps__insert(kmaps, new_map);
1258
1259	/* Insert mappings after new_map, splitting new_map in the process. */
1260	for (unsigned int i = first_after_; i < kmaps__nr_maps; i++)
1261		__maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
1262
1263	/* Copy the maps from merged into kmaps. */
1264	for (unsigned int i = 0; i < kmaps__nr_maps; i++)
1265		map__zput(kmaps_maps_by_address[i]);
1266
1267	free(kmaps_maps_by_address);
1268	check_invariants(kmaps);
1269	up_write(maps__lock(kmaps));
1270	return 0;
1271}
1272
1273void maps__load_first(struct maps *maps)
1274{
 
 
1275	down_read(maps__lock(maps));
1276
1277	if (maps__nr_maps(maps) > 0)
1278		map__load(maps__maps_by_address(maps)[0]);
 
1279
1280	up_read(maps__lock(maps));
1281}