Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/err.h>
  4#include <linux/slab.h>
  5#include <linux/spinlock.h>
  6#include "messages.h"
  7#include "ctree.h"
  8#include "volumes.h"
  9#include "extent_map.h"
 10#include "compression.h"
 11#include "btrfs_inode.h"
 12
 13
 14static struct kmem_cache *extent_map_cache;
 15
 16int __init extent_map_init(void)
 17{
 18	extent_map_cache = kmem_cache_create("btrfs_extent_map",
 19			sizeof(struct extent_map), 0,
 20			SLAB_MEM_SPREAD, NULL);
 21	if (!extent_map_cache)
 22		return -ENOMEM;
 23	return 0;
 24}
 25
 26void __cold extent_map_exit(void)
 27{
 28	kmem_cache_destroy(extent_map_cache);
 29}
 30
 31/*
 32 * Initialize the extent tree @tree.  Should be called for each new inode or
 33 * other user of the extent_map interface.
 34 */
 35void extent_map_tree_init(struct extent_map_tree *tree)
 36{
 37	tree->map = RB_ROOT_CACHED;
 38	INIT_LIST_HEAD(&tree->modified_extents);
 39	rwlock_init(&tree->lock);
 40}
 41
 42/*
 43 * Allocate a new extent_map structure.  The new structure is returned with a
 44 * reference count of one and needs to be freed using free_extent_map()
 45 */
 46struct extent_map *alloc_extent_map(void)
 47{
 48	struct extent_map *em;
 49	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
 50	if (!em)
 51		return NULL;
 52	RB_CLEAR_NODE(&em->rb_node);
 53	em->compress_type = BTRFS_COMPRESS_NONE;
 54	refcount_set(&em->refs, 1);
 55	INIT_LIST_HEAD(&em->list);
 56	return em;
 57}
 58
 59/*
 60 * Drop the reference out on @em by one and free the structure if the reference
 61 * count hits zero.
 62 */
 63void free_extent_map(struct extent_map *em)
 64{
 65	if (!em)
 66		return;
 67	if (refcount_dec_and_test(&em->refs)) {
 68		WARN_ON(extent_map_in_tree(em));
 69		WARN_ON(!list_empty(&em->list));
 70		if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
 71			kfree(em->map_lookup);
 72		kmem_cache_free(extent_map_cache, em);
 73	}
 74}
 75
 76/* Do the math around the end of an extent, handling wrapping. */
 77static u64 range_end(u64 start, u64 len)
 78{
 79	if (start + len < start)
 80		return (u64)-1;
 81	return start + len;
 82}
 83
 84static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
 85{
 86	struct rb_node **p = &root->rb_root.rb_node;
 87	struct rb_node *parent = NULL;
 88	struct extent_map *entry = NULL;
 89	struct rb_node *orig_parent = NULL;
 90	u64 end = range_end(em->start, em->len);
 91	bool leftmost = true;
 92
 93	while (*p) {
 94		parent = *p;
 95		entry = rb_entry(parent, struct extent_map, rb_node);
 96
 97		if (em->start < entry->start) {
 98			p = &(*p)->rb_left;
 99		} else if (em->start >= extent_map_end(entry)) {
100			p = &(*p)->rb_right;
101			leftmost = false;
102		} else {
103			return -EEXIST;
104		}
105	}
106
107	orig_parent = parent;
108	while (parent && em->start >= extent_map_end(entry)) {
109		parent = rb_next(parent);
110		entry = rb_entry(parent, struct extent_map, rb_node);
111	}
112	if (parent)
113		if (end > entry->start && em->start < extent_map_end(entry))
114			return -EEXIST;
115
116	parent = orig_parent;
117	entry = rb_entry(parent, struct extent_map, rb_node);
118	while (parent && em->start < entry->start) {
119		parent = rb_prev(parent);
120		entry = rb_entry(parent, struct extent_map, rb_node);
121	}
122	if (parent)
123		if (end > entry->start && em->start < extent_map_end(entry))
124			return -EEXIST;
125
126	rb_link_node(&em->rb_node, orig_parent, p);
127	rb_insert_color_cached(&em->rb_node, root, leftmost);
128	return 0;
129}
130
131/*
132 * Search through the tree for an extent_map with a given offset.  If it can't
133 * be found, try to find some neighboring extents
134 */
135static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
136				     struct rb_node **prev_or_next_ret)
137{
138	struct rb_node *n = root->rb_node;
139	struct rb_node *prev = NULL;
140	struct rb_node *orig_prev = NULL;
141	struct extent_map *entry;
142	struct extent_map *prev_entry = NULL;
143
144	ASSERT(prev_or_next_ret);
145
146	while (n) {
147		entry = rb_entry(n, struct extent_map, rb_node);
148		prev = n;
149		prev_entry = entry;
150
151		if (offset < entry->start)
152			n = n->rb_left;
153		else if (offset >= extent_map_end(entry))
154			n = n->rb_right;
155		else
156			return n;
157	}
158
159	orig_prev = prev;
160	while (prev && offset >= extent_map_end(prev_entry)) {
161		prev = rb_next(prev);
162		prev_entry = rb_entry(prev, struct extent_map, rb_node);
163	}
164
165	/*
166	 * Previous extent map found, return as in this case the caller does not
167	 * care about the next one.
168	 */
169	if (prev) {
170		*prev_or_next_ret = prev;
171		return NULL;
172	}
173
174	prev = orig_prev;
175	prev_entry = rb_entry(prev, struct extent_map, rb_node);
176	while (prev && offset < prev_entry->start) {
177		prev = rb_prev(prev);
178		prev_entry = rb_entry(prev, struct extent_map, rb_node);
179	}
180	*prev_or_next_ret = prev;
181
182	return NULL;
183}
184
185/* Check to see if two extent_map structs are adjacent and safe to merge. */
186static int mergable_maps(struct extent_map *prev, struct extent_map *next)
187{
188	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
189		return 0;
 
 
190
191	/*
192	 * don't merge compressed extents, we need to know their
193	 * actual size
194	 */
195	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
196		return 0;
 
 
197
198	if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
199	    test_bit(EXTENT_FLAG_LOGGING, &next->flags))
200		return 0;
201
202	/*
203	 * We don't want to merge stuff that hasn't been written to the log yet
204	 * since it may not reflect exactly what is on disk, and that would be
205	 * bad.
206	 */
207	if (!list_empty(&prev->list) || !list_empty(&next->list))
208		return 0;
209
210	ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
211	       prev->block_start != EXTENT_MAP_DELALLOC);
212
213	if (prev->map_lookup || next->map_lookup)
214		ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
215		       test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
216
217	if (extent_map_end(prev) == next->start &&
218	    prev->flags == next->flags &&
219	    prev->map_lookup == next->map_lookup &&
220	    ((next->block_start == EXTENT_MAP_HOLE &&
221	      prev->block_start == EXTENT_MAP_HOLE) ||
222	     (next->block_start == EXTENT_MAP_INLINE &&
223	      prev->block_start == EXTENT_MAP_INLINE) ||
224	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
225	      next->block_start == extent_map_block_end(prev)))) {
226		return 1;
227	}
228	return 0;
229}
230
231static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
232{
233	struct extent_map *merge = NULL;
234	struct rb_node *rb;
235
236	/*
237	 * We can't modify an extent map that is in the tree and that is being
238	 * used by another task, as it can cause that other task to see it in
239	 * inconsistent state during the merging. We always have 1 reference for
240	 * the tree and 1 for this task (which is unpinning the extent map or
241	 * clearing the logging flag), so anything > 2 means it's being used by
242	 * other tasks too.
243	 */
244	if (refcount_read(&em->refs) > 2)
245		return;
246
 
 
 
247	if (em->start != 0) {
248		rb = rb_prev(&em->rb_node);
249		if (rb)
250			merge = rb_entry(rb, struct extent_map, rb_node);
251		if (rb && mergable_maps(merge, em)) {
252			em->start = merge->start;
253			em->orig_start = merge->orig_start;
254			em->len += merge->len;
255			em->block_len += merge->block_len;
256			em->block_start = merge->block_start;
257			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
258			em->mod_start = merge->mod_start;
259			em->generation = max(em->generation, merge->generation);
260			set_bit(EXTENT_FLAG_MERGED, &em->flags);
261
262			rb_erase_cached(&merge->rb_node, &tree->map);
263			RB_CLEAR_NODE(&merge->rb_node);
264			free_extent_map(merge);
265		}
266	}
267
268	rb = rb_next(&em->rb_node);
269	if (rb)
270		merge = rb_entry(rb, struct extent_map, rb_node);
271	if (rb && mergable_maps(em, merge)) {
272		em->len += merge->len;
273		em->block_len += merge->block_len;
274		rb_erase_cached(&merge->rb_node, &tree->map);
275		RB_CLEAR_NODE(&merge->rb_node);
276		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
277		em->generation = max(em->generation, merge->generation);
278		set_bit(EXTENT_FLAG_MERGED, &em->flags);
279		free_extent_map(merge);
280	}
281}
282
283/*
284 * Unpin an extent from the cache.
285 *
286 * @tree:	tree to unpin the extent in
287 * @start:	logical offset in the file
288 * @len:	length of the extent
289 * @gen:	generation that this extent has been modified in
290 *
291 * Called after an extent has been written to disk properly.  Set the generation
292 * to the generation that actually added the file item to the inode so we know
293 * we need to sync this extent when we call fsync().
 
 
 
 
294 */
295int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
296		       u64 gen)
297{
 
 
298	int ret = 0;
299	struct extent_map *em;
300	bool prealloc = false;
301
302	write_lock(&tree->lock);
303	em = lookup_extent_mapping(tree, start, len);
304
305	WARN_ON(!em || em->start != start);
 
 
 
 
 
 
 
306
307	if (!em)
 
 
 
 
 
308		goto out;
 
309
310	em->generation = gen;
311	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
312	em->mod_start = em->start;
313	em->mod_len = em->len;
314
315	if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
316		prealloc = true;
317		clear_bit(EXTENT_FLAG_FILLING, &em->flags);
318	}
319
320	try_merge_map(tree, em);
321
322	if (prealloc) {
323		em->mod_start = em->start;
324		em->mod_len = em->len;
325	}
326
327	free_extent_map(em);
328out:
329	write_unlock(&tree->lock);
 
330	return ret;
331
332}
333
334void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
335{
336	lockdep_assert_held_write(&tree->lock);
337
338	clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
339	if (extent_map_in_tree(em))
340		try_merge_map(tree, em);
341}
342
343static inline void setup_extent_mapping(struct extent_map_tree *tree,
344					struct extent_map *em,
345					int modified)
346{
347	refcount_inc(&em->refs);
348	em->mod_start = em->start;
349	em->mod_len = em->len;
350
 
 
351	if (modified)
352		list_move(&em->list, &tree->modified_extents);
353	else
354		try_merge_map(tree, em);
355}
356
357static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
358{
359	struct map_lookup *map = em->map_lookup;
360	u64 stripe_size = em->orig_block_len;
361	int i;
362
363	for (i = 0; i < map->num_stripes; i++) {
364		struct btrfs_io_stripe *stripe = &map->stripes[i];
365		struct btrfs_device *device = stripe->dev;
366
367		set_extent_bits_nowait(&device->alloc_state, stripe->physical,
368				 stripe->physical + stripe_size - 1, bits);
369	}
370}
371
372static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
373{
374	struct map_lookup *map = em->map_lookup;
375	u64 stripe_size = em->orig_block_len;
376	int i;
377
378	for (i = 0; i < map->num_stripes; i++) {
379		struct btrfs_io_stripe *stripe = &map->stripes[i];
380		struct btrfs_device *device = stripe->dev;
381
382		__clear_extent_bit(&device->alloc_state, stripe->physical,
383				   stripe->physical + stripe_size - 1, bits,
384				   NULL, GFP_NOWAIT, NULL);
385	}
386}
387
388/*
389 * Add new extent map to the extent tree
390 *
391 * @tree:	tree to insert new map in
392 * @em:		map to insert
393 * @modified:	indicate whether the given @em should be added to the
394 *	        modified list, which indicates the extent needs to be logged
395 *
396 * Insert @em into @tree or perform a simple forward/backward merge with
397 * existing mappings.  The extent_map struct passed in will be inserted
398 * into the tree directly, with an additional reference taken, or a
399 * reference dropped if the merge attempt was successful.
400 */
401int add_extent_mapping(struct extent_map_tree *tree,
402		       struct extent_map *em, int modified)
403{
404	int ret = 0;
405
406	lockdep_assert_held_write(&tree->lock);
407
408	ret = tree_insert(&tree->map, em);
409	if (ret)
410		goto out;
411
412	setup_extent_mapping(tree, em, modified);
413	if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
414		extent_map_device_set_bits(em, CHUNK_ALLOCATED);
415		extent_map_device_clear_bits(em, CHUNK_TRIMMED);
416	}
417out:
418	return ret;
419}
420
421static struct extent_map *
422__lookup_extent_mapping(struct extent_map_tree *tree,
423			u64 start, u64 len, int strict)
424{
425	struct extent_map *em;
426	struct rb_node *rb_node;
427	struct rb_node *prev_or_next = NULL;
428	u64 end = range_end(start, len);
429
430	rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
431	if (!rb_node) {
432		if (prev_or_next)
433			rb_node = prev_or_next;
434		else
435			return NULL;
436	}
437
438	em = rb_entry(rb_node, struct extent_map, rb_node);
439
440	if (strict && !(end > em->start && start < extent_map_end(em)))
441		return NULL;
442
443	refcount_inc(&em->refs);
444	return em;
445}
446
447/*
448 * Lookup extent_map that intersects @start + @len range.
449 *
450 * @tree:	tree to lookup in
451 * @start:	byte offset to start the search
452 * @len:	length of the lookup range
453 *
454 * Find and return the first extent_map struct in @tree that intersects the
455 * [start, len] range.  There may be additional objects in the tree that
456 * intersect, so check the object returned carefully to make sure that no
457 * additional lookups are needed.
458 */
459struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
460					 u64 start, u64 len)
461{
462	return __lookup_extent_mapping(tree, start, len, 1);
463}
464
465/*
466 * Find a nearby extent map intersecting @start + @len (not an exact search).
467 *
468 * @tree:	tree to lookup in
469 * @start:	byte offset to start the search
470 * @len:	length of the lookup range
471 *
472 * Find and return the first extent_map struct in @tree that intersects the
473 * [start, len] range.
474 *
475 * If one can't be found, any nearby extent may be returned
476 */
477struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
478					 u64 start, u64 len)
479{
480	return __lookup_extent_mapping(tree, start, len, 0);
481}
482
483/*
484 * Remove an extent_map from the extent tree.
485 *
486 * @tree:	extent tree to remove from
487 * @em:		extent map being removed
488 *
489 * Remove @em from @tree.  No reference counts are dropped, and no checks
490 * are done to see if the range is in use.
491 */
492void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
493{
494	lockdep_assert_held_write(&tree->lock);
495
496	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
497	rb_erase_cached(&em->rb_node, &tree->map);
498	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
499		list_del_init(&em->list);
500	if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
501		extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
502	RB_CLEAR_NODE(&em->rb_node);
503}
504
505void replace_extent_mapping(struct extent_map_tree *tree,
506			    struct extent_map *cur,
507			    struct extent_map *new,
508			    int modified)
509{
510	lockdep_assert_held_write(&tree->lock);
511
512	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
513	ASSERT(extent_map_in_tree(cur));
514	if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
515		list_del_init(&cur->list);
516	rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
517	RB_CLEAR_NODE(&cur->rb_node);
518
519	setup_extent_mapping(tree, new, modified);
520}
521
522static struct extent_map *next_extent_map(const struct extent_map *em)
523{
524	struct rb_node *next;
525
526	next = rb_next(&em->rb_node);
527	if (!next)
528		return NULL;
529	return container_of(next, struct extent_map, rb_node);
530}
531
532static struct extent_map *prev_extent_map(struct extent_map *em)
533{
534	struct rb_node *prev;
535
536	prev = rb_prev(&em->rb_node);
537	if (!prev)
538		return NULL;
539	return container_of(prev, struct extent_map, rb_node);
540}
541
542/*
543 * Helper for btrfs_get_extent.  Given an existing extent in the tree,
544 * the existing extent is the nearest extent to map_start,
545 * and an extent that you want to insert, deal with overlap and insert
546 * the best fitted new extent into the tree.
547 */
548static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
549					 struct extent_map *existing,
550					 struct extent_map *em,
551					 u64 map_start)
552{
553	struct extent_map *prev;
554	struct extent_map *next;
555	u64 start;
556	u64 end;
557	u64 start_diff;
558
559	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
 
560
561	if (existing->start > map_start) {
562		next = existing;
563		prev = prev_extent_map(next);
564	} else {
565		prev = existing;
566		next = next_extent_map(prev);
567	}
568
569	start = prev ? extent_map_end(prev) : em->start;
570	start = max_t(u64, start, em->start);
571	end = next ? next->start : extent_map_end(em);
572	end = min_t(u64, end, extent_map_end(em));
573	start_diff = start - em->start;
574	em->start = start;
575	em->len = end - start;
576	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
577	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
578		em->block_start += start_diff;
579		em->block_len = em->len;
580	}
581	return add_extent_mapping(em_tree, em, 0);
582}
583
584/*
585 * Add extent mapping into em_tree.
586 *
587 * @fs_info:  the filesystem
588 * @em_tree:  extent tree into which we want to insert the extent mapping
589 * @em_in:    extent we are inserting
590 * @start:    start of the logical range btrfs_get_extent() is requesting
591 * @len:      length of the logical range btrfs_get_extent() is requesting
592 *
593 * Note that @em_in's range may be different from [start, start+len),
594 * but they must be overlapped.
595 *
596 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
597 * the -EEXIST by either:
598 * a) Returning the existing extent in @em_in if @start is within the
599 *    existing em.
600 * b) Merge the existing extent with @em_in passed in.
601 *
602 * Return 0 on success, otherwise -EEXIST.
603 *
604 */
605int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
606			     struct extent_map_tree *em_tree,
607			     struct extent_map **em_in, u64 start, u64 len)
608{
609	int ret;
610	struct extent_map *em = *em_in;
611
612	/*
613	 * Tree-checker should have rejected any inline extent with non-zero
614	 * file offset. Here just do a sanity check.
615	 */
616	if (em->block_start == EXTENT_MAP_INLINE)
617		ASSERT(em->start == 0);
618
619	ret = add_extent_mapping(em_tree, em, 0);
620	/* it is possible that someone inserted the extent into the tree
621	 * while we had the lock dropped.  It is also possible that
622	 * an overlapping map exists in the tree
623	 */
624	if (ret == -EEXIST) {
625		struct extent_map *existing;
626
627		ret = 0;
628
629		existing = search_extent_mapping(em_tree, start, len);
630
631		trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
632
633		/*
634		 * existing will always be non-NULL, since there must be
635		 * extent causing the -EEXIST.
636		 */
637		if (start >= existing->start &&
638		    start < extent_map_end(existing)) {
639			free_extent_map(em);
640			*em_in = existing;
641			ret = 0;
642		} else {
643			u64 orig_start = em->start;
644			u64 orig_len = em->len;
645
646			/*
647			 * The existing extent map is the one nearest to
648			 * the [start, start + len) range which overlaps
649			 */
650			ret = merge_extent_mapping(em_tree, existing,
651						   em, start);
652			if (ret) {
653				free_extent_map(em);
654				*em_in = NULL;
655				WARN_ONCE(ret,
656"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
657					  ret, existing->start, existing->len,
658					  orig_start, orig_len);
659			}
660			free_extent_map(existing);
661		}
662	}
663
664	ASSERT(ret == 0 || ret == -EEXIST);
665	return ret;
666}
667
668/*
669 * Drop all extent maps from a tree in the fastest possible way, rescheduling
670 * if needed. This avoids searching the tree, from the root down to the first
671 * extent map, before each deletion.
672 */
673static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
674{
675	write_lock(&tree->lock);
676	while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
677		struct extent_map *em;
678		struct rb_node *node;
679
680		node = rb_first_cached(&tree->map);
681		em = rb_entry(node, struct extent_map, rb_node);
682		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
683		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
684		remove_extent_mapping(tree, em);
685		free_extent_map(em);
686		cond_resched_rwlock_write(&tree->lock);
687	}
688	write_unlock(&tree->lock);
689}
690
691/*
692 * Drop all extent maps in a given range.
693 *
694 * @inode:       The target inode.
695 * @start:       Start offset of the range.
696 * @end:         End offset of the range (inclusive value).
697 * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
698 *
699 * This drops all the extent maps that intersect the given range [@start, @end].
700 * Extent maps that partially overlap the range and extend behind or beyond it,
701 * are split.
702 * The caller should have locked an appropriate file range in the inode's io
703 * tree before calling this function.
704 */
705void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
706				 bool skip_pinned)
707{
708	struct extent_map *split;
709	struct extent_map *split2;
710	struct extent_map *em;
711	struct extent_map_tree *em_tree = &inode->extent_tree;
712	u64 len = end - start + 1;
713
714	WARN_ON(end < start);
715	if (end == (u64)-1) {
716		if (start == 0 && !skip_pinned) {
717			drop_all_extent_maps_fast(em_tree);
718			return;
719		}
720		len = (u64)-1;
721	} else {
722		/* Make end offset exclusive for use in the loop below. */
723		end++;
724	}
725
726	/*
727	 * It's ok if we fail to allocate the extent maps, see the comment near
728	 * the bottom of the loop below. We only need two spare extent maps in
729	 * the worst case, where the first extent map that intersects our range
730	 * starts before the range and the last extent map that intersects our
731	 * range ends after our range (and they might be the same extent map),
732	 * because we need to split those two extent maps at the boundaries.
733	 */
734	split = alloc_extent_map();
735	split2 = alloc_extent_map();
736
737	write_lock(&em_tree->lock);
738	em = lookup_extent_mapping(em_tree, start, len);
739
740	while (em) {
741		/* extent_map_end() returns exclusive value (last byte + 1). */
742		const u64 em_end = extent_map_end(em);
743		struct extent_map *next_em = NULL;
744		u64 gen;
745		unsigned long flags;
746		bool modified;
747		bool compressed;
748
749		if (em_end < end) {
750			next_em = next_extent_map(em);
751			if (next_em) {
752				if (next_em->start < end)
753					refcount_inc(&next_em->refs);
754				else
755					next_em = NULL;
756			}
757		}
758
759		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
760			start = em_end;
761			if (end != (u64)-1)
762				len = start + len - em_end;
763			goto next;
764		}
765
766		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
767		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 
 
 
 
 
768		modified = !list_empty(&em->list);
769
770		/*
771		 * The extent map does not cross our target range, so no need to
772		 * split it, we can remove it directly.
773		 */
774		if (em->start >= start && em_end <= end)
775			goto remove_em;
776
777		flags = em->flags;
778		gen = em->generation;
779		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
780
781		if (em->start < start) {
782			if (!split) {
783				split = split2;
784				split2 = NULL;
785				if (!split)
786					goto remove_em;
787			}
788			split->start = em->start;
789			split->len = start - em->start;
790
791			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
792				split->orig_start = em->orig_start;
793				split->block_start = em->block_start;
794
795				if (compressed)
796					split->block_len = em->block_len;
797				else
798					split->block_len = split->len;
799				split->orig_block_len = max(split->block_len,
800						em->orig_block_len);
801				split->ram_bytes = em->ram_bytes;
802			} else {
803				split->orig_start = split->start;
804				split->block_len = 0;
805				split->block_start = em->block_start;
806				split->orig_block_len = 0;
807				split->ram_bytes = split->len;
808			}
809
810			split->generation = gen;
811			split->flags = flags;
812			split->compress_type = em->compress_type;
813			replace_extent_mapping(em_tree, em, split, modified);
814			free_extent_map(split);
815			split = split2;
816			split2 = NULL;
817		}
818		if (em_end > end) {
819			if (!split) {
820				split = split2;
821				split2 = NULL;
822				if (!split)
823					goto remove_em;
824			}
825			split->start = start + len;
826			split->len = em_end - (start + len);
827			split->block_start = em->block_start;
828			split->flags = flags;
829			split->compress_type = em->compress_type;
830			split->generation = gen;
831
832			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
833				split->orig_block_len = max(em->block_len,
834						    em->orig_block_len);
835
836				split->ram_bytes = em->ram_bytes;
837				if (compressed) {
838					split->block_len = em->block_len;
839					split->orig_start = em->orig_start;
840				} else {
841					const u64 diff = start + len - em->start;
842
843					split->block_len = split->len;
844					split->block_start += diff;
845					split->orig_start = em->orig_start;
846				}
847			} else {
848				split->ram_bytes = split->len;
849				split->orig_start = split->start;
850				split->block_len = 0;
851				split->orig_block_len = 0;
852			}
853
854			if (extent_map_in_tree(em)) {
855				replace_extent_mapping(em_tree, em, split,
856						       modified);
857			} else {
858				int ret;
859
860				ret = add_extent_mapping(em_tree, split,
861							 modified);
862				/* Logic error, shouldn't happen. */
863				ASSERT(ret == 0);
864				if (WARN_ON(ret != 0) && modified)
865					btrfs_set_inode_full_sync(inode);
866			}
867			free_extent_map(split);
868			split = NULL;
869		}
870remove_em:
871		if (extent_map_in_tree(em)) {
872			/*
873			 * If the extent map is still in the tree it means that
874			 * either of the following is true:
875			 *
876			 * 1) It fits entirely in our range (doesn't end beyond
877			 *    it or starts before it);
878			 *
879			 * 2) It starts before our range and/or ends after our
880			 *    range, and we were not able to allocate the extent
881			 *    maps for split operations, @split and @split2.
882			 *
883			 * If we are at case 2) then we just remove the entire
884			 * extent map - this is fine since if anyone needs it to
885			 * access the subranges outside our range, will just
886			 * load it again from the subvolume tree's file extent
887			 * item. However if the extent map was in the list of
888			 * modified extents, then we must mark the inode for a
889			 * full fsync, otherwise a fast fsync will miss this
890			 * extent if it's new and needs to be logged.
891			 */
892			if ((em->start < start || em_end > end) && modified) {
893				ASSERT(!split);
894				btrfs_set_inode_full_sync(inode);
895			}
896			remove_extent_mapping(em_tree, em);
897		}
898
899		/*
900		 * Once for the tree reference (we replaced or removed the
901		 * extent map from the tree).
902		 */
903		free_extent_map(em);
904next:
905		/* Once for us (for our lookup reference). */
906		free_extent_map(em);
907
908		em = next_em;
909	}
910
911	write_unlock(&em_tree->lock);
912
913	free_extent_map(split);
914	free_extent_map(split2);
915}
916
917/*
918 * Replace a range in the inode's extent map tree with a new extent map.
919 *
920 * @inode:      The target inode.
921 * @new_em:     The new extent map to add to the inode's extent map tree.
922 * @modified:   Indicate if the new extent map should be added to the list of
923 *              modified extents (for fast fsync tracking).
924 *
925 * Drops all the extent maps in the inode's extent map tree that intersect the
926 * range of the new extent map and adds the new extent map to the tree.
927 * The caller should have locked an appropriate file range in the inode's io
928 * tree before calling this function.
929 */
930int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
931				   struct extent_map *new_em,
932				   bool modified)
933{
934	const u64 end = new_em->start + new_em->len - 1;
935	struct extent_map_tree *tree = &inode->extent_tree;
936	int ret;
937
938	ASSERT(!extent_map_in_tree(new_em));
939
940	/*
941	 * The caller has locked an appropriate file range in the inode's io
942	 * tree, but getting -EEXIST when adding the new extent map can still
943	 * happen in case there are extents that partially cover the range, and
944	 * this is due to two tasks operating on different parts of the extent.
945	 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
946	 * btrfs_get_extent") for an example and details.
947	 */
948	do {
949		btrfs_drop_extent_map_range(inode, new_em->start, end, false);
950		write_lock(&tree->lock);
951		ret = add_extent_mapping(tree, new_em, modified);
952		write_unlock(&tree->lock);
953	} while (ret == -EEXIST);
954
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
955	return ret;
956}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/err.h>
   4#include <linux/slab.h>
   5#include <linux/spinlock.h>
   6#include "messages.h"
   7#include "ctree.h"
 
   8#include "extent_map.h"
   9#include "compression.h"
  10#include "btrfs_inode.h"
  11
  12
  13static struct kmem_cache *extent_map_cache;
  14
  15int __init extent_map_init(void)
  16{
  17	extent_map_cache = kmem_cache_create("btrfs_extent_map",
  18					     sizeof(struct extent_map), 0, 0, NULL);
 
  19	if (!extent_map_cache)
  20		return -ENOMEM;
  21	return 0;
  22}
  23
  24void __cold extent_map_exit(void)
  25{
  26	kmem_cache_destroy(extent_map_cache);
  27}
  28
  29/*
  30 * Initialize the extent tree @tree.  Should be called for each new inode or
  31 * other user of the extent_map interface.
  32 */
  33void extent_map_tree_init(struct extent_map_tree *tree)
  34{
  35	tree->map = RB_ROOT_CACHED;
  36	INIT_LIST_HEAD(&tree->modified_extents);
  37	rwlock_init(&tree->lock);
  38}
  39
  40/*
  41 * Allocate a new extent_map structure.  The new structure is returned with a
  42 * reference count of one and needs to be freed using free_extent_map()
  43 */
  44struct extent_map *alloc_extent_map(void)
  45{
  46	struct extent_map *em;
  47	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
  48	if (!em)
  49		return NULL;
  50	RB_CLEAR_NODE(&em->rb_node);
 
  51	refcount_set(&em->refs, 1);
  52	INIT_LIST_HEAD(&em->list);
  53	return em;
  54}
  55
  56/*
  57 * Drop the reference out on @em by one and free the structure if the reference
  58 * count hits zero.
  59 */
  60void free_extent_map(struct extent_map *em)
  61{
  62	if (!em)
  63		return;
  64	if (refcount_dec_and_test(&em->refs)) {
  65		WARN_ON(extent_map_in_tree(em));
  66		WARN_ON(!list_empty(&em->list));
 
 
  67		kmem_cache_free(extent_map_cache, em);
  68	}
  69}
  70
  71/* Do the math around the end of an extent, handling wrapping. */
  72static u64 range_end(u64 start, u64 len)
  73{
  74	if (start + len < start)
  75		return (u64)-1;
  76	return start + len;
  77}
  78
  79static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
  80{
  81	struct rb_node **p = &root->rb_root.rb_node;
  82	struct rb_node *parent = NULL;
  83	struct extent_map *entry = NULL;
  84	struct rb_node *orig_parent = NULL;
  85	u64 end = range_end(em->start, em->len);
  86	bool leftmost = true;
  87
  88	while (*p) {
  89		parent = *p;
  90		entry = rb_entry(parent, struct extent_map, rb_node);
  91
  92		if (em->start < entry->start) {
  93			p = &(*p)->rb_left;
  94		} else if (em->start >= extent_map_end(entry)) {
  95			p = &(*p)->rb_right;
  96			leftmost = false;
  97		} else {
  98			return -EEXIST;
  99		}
 100	}
 101
 102	orig_parent = parent;
 103	while (parent && em->start >= extent_map_end(entry)) {
 104		parent = rb_next(parent);
 105		entry = rb_entry(parent, struct extent_map, rb_node);
 106	}
 107	if (parent)
 108		if (end > entry->start && em->start < extent_map_end(entry))
 109			return -EEXIST;
 110
 111	parent = orig_parent;
 112	entry = rb_entry(parent, struct extent_map, rb_node);
 113	while (parent && em->start < entry->start) {
 114		parent = rb_prev(parent);
 115		entry = rb_entry(parent, struct extent_map, rb_node);
 116	}
 117	if (parent)
 118		if (end > entry->start && em->start < extent_map_end(entry))
 119			return -EEXIST;
 120
 121	rb_link_node(&em->rb_node, orig_parent, p);
 122	rb_insert_color_cached(&em->rb_node, root, leftmost);
 123	return 0;
 124}
 125
 126/*
 127 * Search through the tree for an extent_map with a given offset.  If it can't
 128 * be found, try to find some neighboring extents
 129 */
 130static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
 131				     struct rb_node **prev_or_next_ret)
 132{
 133	struct rb_node *n = root->rb_node;
 134	struct rb_node *prev = NULL;
 135	struct rb_node *orig_prev = NULL;
 136	struct extent_map *entry;
 137	struct extent_map *prev_entry = NULL;
 138
 139	ASSERT(prev_or_next_ret);
 140
 141	while (n) {
 142		entry = rb_entry(n, struct extent_map, rb_node);
 143		prev = n;
 144		prev_entry = entry;
 145
 146		if (offset < entry->start)
 147			n = n->rb_left;
 148		else if (offset >= extent_map_end(entry))
 149			n = n->rb_right;
 150		else
 151			return n;
 152	}
 153
 154	orig_prev = prev;
 155	while (prev && offset >= extent_map_end(prev_entry)) {
 156		prev = rb_next(prev);
 157		prev_entry = rb_entry(prev, struct extent_map, rb_node);
 158	}
 159
 160	/*
 161	 * Previous extent map found, return as in this case the caller does not
 162	 * care about the next one.
 163	 */
 164	if (prev) {
 165		*prev_or_next_ret = prev;
 166		return NULL;
 167	}
 168
 169	prev = orig_prev;
 170	prev_entry = rb_entry(prev, struct extent_map, rb_node);
 171	while (prev && offset < prev_entry->start) {
 172		prev = rb_prev(prev);
 173		prev_entry = rb_entry(prev, struct extent_map, rb_node);
 174	}
 175	*prev_or_next_ret = prev;
 176
 177	return NULL;
 178}
 179
 180static inline u64 extent_map_block_end(const struct extent_map *em)
 
 181{
 182	if (em->block_start + em->block_len < em->block_start)
 183		return (u64)-1;
 184	return em->block_start + em->block_len;
 185}
 186
 187static bool can_merge_extent_map(const struct extent_map *em)
 188{
 189	if (em->flags & EXTENT_FLAG_PINNED)
 190		return false;
 191
 192	/* Don't merge compressed extents, we need to know their actual size. */
 193	if (extent_map_is_compressed(em))
 194		return false;
 195
 196	if (em->flags & EXTENT_FLAG_LOGGING)
 197		return false;
 
 198
 199	/*
 200	 * We don't want to merge stuff that hasn't been written to the log yet
 201	 * since it may not reflect exactly what is on disk, and that would be
 202	 * bad.
 203	 */
 204	if (!list_empty(&em->list))
 205		return false;
 206
 207	return true;
 208}
 209
 210/* Check to see if two extent_map structs are adjacent and safe to merge. */
 211static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next)
 212{
 213	if (extent_map_end(prev) != next->start)
 214		return false;
 215
 216	if (prev->flags != next->flags)
 217		return false;
 218
 219	if (next->block_start < EXTENT_MAP_LAST_BYTE - 1)
 220		return next->block_start == extent_map_block_end(prev);
 221
 222	/* HOLES and INLINE extents. */
 223	return next->block_start == prev->block_start;
 
 
 224}
 225
 226static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
 227{
 228	struct extent_map *merge = NULL;
 229	struct rb_node *rb;
 230
 231	/*
 232	 * We can't modify an extent map that is in the tree and that is being
 233	 * used by another task, as it can cause that other task to see it in
 234	 * inconsistent state during the merging. We always have 1 reference for
 235	 * the tree and 1 for this task (which is unpinning the extent map or
 236	 * clearing the logging flag), so anything > 2 means it's being used by
 237	 * other tasks too.
 238	 */
 239	if (refcount_read(&em->refs) > 2)
 240		return;
 241
 242	if (!can_merge_extent_map(em))
 243		return;
 244
 245	if (em->start != 0) {
 246		rb = rb_prev(&em->rb_node);
 247		if (rb)
 248			merge = rb_entry(rb, struct extent_map, rb_node);
 249		if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
 250			em->start = merge->start;
 251			em->orig_start = merge->orig_start;
 252			em->len += merge->len;
 253			em->block_len += merge->block_len;
 254			em->block_start = merge->block_start;
 255			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
 256			em->mod_start = merge->mod_start;
 257			em->generation = max(em->generation, merge->generation);
 258			em->flags |= EXTENT_FLAG_MERGED;
 259
 260			rb_erase_cached(&merge->rb_node, &tree->map);
 261			RB_CLEAR_NODE(&merge->rb_node);
 262			free_extent_map(merge);
 263		}
 264	}
 265
 266	rb = rb_next(&em->rb_node);
 267	if (rb)
 268		merge = rb_entry(rb, struct extent_map, rb_node);
 269	if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
 270		em->len += merge->len;
 271		em->block_len += merge->block_len;
 272		rb_erase_cached(&merge->rb_node, &tree->map);
 273		RB_CLEAR_NODE(&merge->rb_node);
 274		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
 275		em->generation = max(em->generation, merge->generation);
 276		em->flags |= EXTENT_FLAG_MERGED;
 277		free_extent_map(merge);
 278	}
 279}
 280
 281/*
 282 * Unpin an extent from the cache.
 283 *
 284 * @inode:	the inode from which we are unpinning an extent range
 285 * @start:	logical offset in the file
 286 * @len:	length of the extent
 287 * @gen:	generation that this extent has been modified in
 288 *
 289 * Called after an extent has been written to disk properly.  Set the generation
 290 * to the generation that actually added the file item to the inode so we know
 291 * we need to sync this extent when we call fsync().
 292 *
 293 * Returns: 0	     on success
 294 * 	    -ENOENT  when the extent is not found in the tree
 295 * 	    -EUCLEAN if the found extent does not match the expected start
 296 */
 297int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
 
 298{
 299	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 300	struct extent_map_tree *tree = &inode->extent_tree;
 301	int ret = 0;
 302	struct extent_map *em;
 303	bool prealloc = false;
 304
 305	write_lock(&tree->lock);
 306	em = lookup_extent_mapping(tree, start, len);
 307
 308	if (WARN_ON(!em)) {
 309		btrfs_warn(fs_info,
 310"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
 311			   btrfs_ino(inode), btrfs_root_id(inode->root),
 312			   start, start + len, gen);
 313		ret = -ENOENT;
 314		goto out;
 315	}
 316
 317	if (WARN_ON(em->start != start)) {
 318		btrfs_warn(fs_info,
 319"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
 320			   btrfs_ino(inode), btrfs_root_id(inode->root),
 321			   em->start, start, start + len, gen);
 322		ret = -EUCLEAN;
 323		goto out;
 324	}
 325
 326	em->generation = gen;
 327	em->flags &= ~EXTENT_FLAG_PINNED;
 328	em->mod_start = em->start;
 329	em->mod_len = em->len;
 330
 331	if (em->flags & EXTENT_FLAG_FILLING) {
 332		prealloc = true;
 333		em->flags &= ~EXTENT_FLAG_FILLING;
 334	}
 335
 336	try_merge_map(tree, em);
 337
 338	if (prealloc) {
 339		em->mod_start = em->start;
 340		em->mod_len = em->len;
 341	}
 342
 
 343out:
 344	write_unlock(&tree->lock);
 345	free_extent_map(em);
 346	return ret;
 347
 348}
 349
 350void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
 351{
 352	lockdep_assert_held_write(&tree->lock);
 353
 354	em->flags &= ~EXTENT_FLAG_LOGGING;
 355	if (extent_map_in_tree(em))
 356		try_merge_map(tree, em);
 357}
 358
 359static inline void setup_extent_mapping(struct extent_map_tree *tree,
 360					struct extent_map *em,
 361					int modified)
 362{
 363	refcount_inc(&em->refs);
 364	em->mod_start = em->start;
 365	em->mod_len = em->len;
 366
 367	ASSERT(list_empty(&em->list));
 368
 369	if (modified)
 370		list_add(&em->list, &tree->modified_extents);
 371	else
 372		try_merge_map(tree, em);
 373}
 374
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375/*
 376 * Add new extent map to the extent tree
 377 *
 378 * @tree:	tree to insert new map in
 379 * @em:		map to insert
 380 * @modified:	indicate whether the given @em should be added to the
 381 *	        modified list, which indicates the extent needs to be logged
 382 *
 383 * Insert @em into @tree or perform a simple forward/backward merge with
 384 * existing mappings.  The extent_map struct passed in will be inserted
 385 * into the tree directly, with an additional reference taken, or a
 386 * reference dropped if the merge attempt was successful.
 387 */
 388static int add_extent_mapping(struct extent_map_tree *tree,
 389			      struct extent_map *em, int modified)
 390{
 391	int ret = 0;
 392
 393	lockdep_assert_held_write(&tree->lock);
 394
 395	ret = tree_insert(&tree->map, em);
 396	if (ret)
 397		goto out;
 398
 399	setup_extent_mapping(tree, em, modified);
 
 
 
 
 400out:
 401	return ret;
 402}
 403
 404static struct extent_map *
 405__lookup_extent_mapping(struct extent_map_tree *tree,
 406			u64 start, u64 len, int strict)
 407{
 408	struct extent_map *em;
 409	struct rb_node *rb_node;
 410	struct rb_node *prev_or_next = NULL;
 411	u64 end = range_end(start, len);
 412
 413	rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
 414	if (!rb_node) {
 415		if (prev_or_next)
 416			rb_node = prev_or_next;
 417		else
 418			return NULL;
 419	}
 420
 421	em = rb_entry(rb_node, struct extent_map, rb_node);
 422
 423	if (strict && !(end > em->start && start < extent_map_end(em)))
 424		return NULL;
 425
 426	refcount_inc(&em->refs);
 427	return em;
 428}
 429
 430/*
 431 * Lookup extent_map that intersects @start + @len range.
 432 *
 433 * @tree:	tree to lookup in
 434 * @start:	byte offset to start the search
 435 * @len:	length of the lookup range
 436 *
 437 * Find and return the first extent_map struct in @tree that intersects the
 438 * [start, len] range.  There may be additional objects in the tree that
 439 * intersect, so check the object returned carefully to make sure that no
 440 * additional lookups are needed.
 441 */
 442struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
 443					 u64 start, u64 len)
 444{
 445	return __lookup_extent_mapping(tree, start, len, 1);
 446}
 447
 448/*
 449 * Find a nearby extent map intersecting @start + @len (not an exact search).
 450 *
 451 * @tree:	tree to lookup in
 452 * @start:	byte offset to start the search
 453 * @len:	length of the lookup range
 454 *
 455 * Find and return the first extent_map struct in @tree that intersects the
 456 * [start, len] range.
 457 *
 458 * If one can't be found, any nearby extent may be returned
 459 */
 460struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
 461					 u64 start, u64 len)
 462{
 463	return __lookup_extent_mapping(tree, start, len, 0);
 464}
 465
 466/*
 467 * Remove an extent_map from the extent tree.
 468 *
 469 * @tree:	extent tree to remove from
 470 * @em:		extent map being removed
 471 *
 472 * Remove @em from @tree.  No reference counts are dropped, and no checks
 473 * are done to see if the range is in use.
 474 */
 475void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
 476{
 477	lockdep_assert_held_write(&tree->lock);
 478
 479	WARN_ON(em->flags & EXTENT_FLAG_PINNED);
 480	rb_erase_cached(&em->rb_node, &tree->map);
 481	if (!(em->flags & EXTENT_FLAG_LOGGING))
 482		list_del_init(&em->list);
 
 
 483	RB_CLEAR_NODE(&em->rb_node);
 484}
 485
 486static void replace_extent_mapping(struct extent_map_tree *tree,
 487				   struct extent_map *cur,
 488				   struct extent_map *new,
 489				   int modified)
 490{
 491	lockdep_assert_held_write(&tree->lock);
 492
 493	WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
 494	ASSERT(extent_map_in_tree(cur));
 495	if (!(cur->flags & EXTENT_FLAG_LOGGING))
 496		list_del_init(&cur->list);
 497	rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
 498	RB_CLEAR_NODE(&cur->rb_node);
 499
 500	setup_extent_mapping(tree, new, modified);
 501}
 502
 503static struct extent_map *next_extent_map(const struct extent_map *em)
 504{
 505	struct rb_node *next;
 506
 507	next = rb_next(&em->rb_node);
 508	if (!next)
 509		return NULL;
 510	return container_of(next, struct extent_map, rb_node);
 511}
 512
 513static struct extent_map *prev_extent_map(struct extent_map *em)
 514{
 515	struct rb_node *prev;
 516
 517	prev = rb_prev(&em->rb_node);
 518	if (!prev)
 519		return NULL;
 520	return container_of(prev, struct extent_map, rb_node);
 521}
 522
 523/*
 524 * Helper for btrfs_get_extent.  Given an existing extent in the tree,
 525 * the existing extent is the nearest extent to map_start,
 526 * and an extent that you want to insert, deal with overlap and insert
 527 * the best fitted new extent into the tree.
 528 */
 529static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
 530					 struct extent_map *existing,
 531					 struct extent_map *em,
 532					 u64 map_start)
 533{
 534	struct extent_map *prev;
 535	struct extent_map *next;
 536	u64 start;
 537	u64 end;
 538	u64 start_diff;
 539
 540	if (map_start < em->start || map_start >= extent_map_end(em))
 541		return -EINVAL;
 542
 543	if (existing->start > map_start) {
 544		next = existing;
 545		prev = prev_extent_map(next);
 546	} else {
 547		prev = existing;
 548		next = next_extent_map(prev);
 549	}
 550
 551	start = prev ? extent_map_end(prev) : em->start;
 552	start = max_t(u64, start, em->start);
 553	end = next ? next->start : extent_map_end(em);
 554	end = min_t(u64, end, extent_map_end(em));
 555	start_diff = start - em->start;
 556	em->start = start;
 557	em->len = end - start;
 558	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
 559	    !extent_map_is_compressed(em)) {
 560		em->block_start += start_diff;
 561		em->block_len = em->len;
 562	}
 563	return add_extent_mapping(em_tree, em, 0);
 564}
 565
 566/*
 567 * Add extent mapping into em_tree.
 568 *
 569 * @fs_info:  the filesystem
 570 * @em_tree:  extent tree into which we want to insert the extent mapping
 571 * @em_in:    extent we are inserting
 572 * @start:    start of the logical range btrfs_get_extent() is requesting
 573 * @len:      length of the logical range btrfs_get_extent() is requesting
 574 *
 575 * Note that @em_in's range may be different from [start, start+len),
 576 * but they must be overlapped.
 577 *
 578 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
 579 * the -EEXIST by either:
 580 * a) Returning the existing extent in @em_in if @start is within the
 581 *    existing em.
 582 * b) Merge the existing extent with @em_in passed in.
 583 *
 584 * Return 0 on success, otherwise -EEXIST.
 585 *
 586 */
 587int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
 588			     struct extent_map_tree *em_tree,
 589			     struct extent_map **em_in, u64 start, u64 len)
 590{
 591	int ret;
 592	struct extent_map *em = *em_in;
 593
 594	/*
 595	 * Tree-checker should have rejected any inline extent with non-zero
 596	 * file offset. Here just do a sanity check.
 597	 */
 598	if (em->block_start == EXTENT_MAP_INLINE)
 599		ASSERT(em->start == 0);
 600
 601	ret = add_extent_mapping(em_tree, em, 0);
 602	/* it is possible that someone inserted the extent into the tree
 603	 * while we had the lock dropped.  It is also possible that
 604	 * an overlapping map exists in the tree
 605	 */
 606	if (ret == -EEXIST) {
 607		struct extent_map *existing;
 608
 
 
 609		existing = search_extent_mapping(em_tree, start, len);
 610
 611		trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
 612
 613		/*
 614		 * existing will always be non-NULL, since there must be
 615		 * extent causing the -EEXIST.
 616		 */
 617		if (start >= existing->start &&
 618		    start < extent_map_end(existing)) {
 619			free_extent_map(em);
 620			*em_in = existing;
 621			ret = 0;
 622		} else {
 623			u64 orig_start = em->start;
 624			u64 orig_len = em->len;
 625
 626			/*
 627			 * The existing extent map is the one nearest to
 628			 * the [start, start + len) range which overlaps
 629			 */
 630			ret = merge_extent_mapping(em_tree, existing,
 631						   em, start);
 632			if (WARN_ON(ret)) {
 633				free_extent_map(em);
 634				*em_in = NULL;
 635				btrfs_warn(fs_info,
 636"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
 637					   existing->start, extent_map_end(existing),
 638					   orig_start, orig_start + orig_len, start);
 639			}
 640			free_extent_map(existing);
 641		}
 642	}
 643
 644	ASSERT(ret == 0 || ret == -EEXIST);
 645	return ret;
 646}
 647
 648/*
 649 * Drop all extent maps from a tree in the fastest possible way, rescheduling
 650 * if needed. This avoids searching the tree, from the root down to the first
 651 * extent map, before each deletion.
 652 */
 653static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
 654{
 655	write_lock(&tree->lock);
 656	while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
 657		struct extent_map *em;
 658		struct rb_node *node;
 659
 660		node = rb_first_cached(&tree->map);
 661		em = rb_entry(node, struct extent_map, rb_node);
 662		em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
 
 663		remove_extent_mapping(tree, em);
 664		free_extent_map(em);
 665		cond_resched_rwlock_write(&tree->lock);
 666	}
 667	write_unlock(&tree->lock);
 668}
 669
 670/*
 671 * Drop all extent maps in a given range.
 672 *
 673 * @inode:       The target inode.
 674 * @start:       Start offset of the range.
 675 * @end:         End offset of the range (inclusive value).
 676 * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
 677 *
 678 * This drops all the extent maps that intersect the given range [@start, @end].
 679 * Extent maps that partially overlap the range and extend behind or beyond it,
 680 * are split.
 681 * The caller should have locked an appropriate file range in the inode's io
 682 * tree before calling this function.
 683 */
 684void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
 685				 bool skip_pinned)
 686{
 687	struct extent_map *split;
 688	struct extent_map *split2;
 689	struct extent_map *em;
 690	struct extent_map_tree *em_tree = &inode->extent_tree;
 691	u64 len = end - start + 1;
 692
 693	WARN_ON(end < start);
 694	if (end == (u64)-1) {
 695		if (start == 0 && !skip_pinned) {
 696			drop_all_extent_maps_fast(em_tree);
 697			return;
 698		}
 699		len = (u64)-1;
 700	} else {
 701		/* Make end offset exclusive for use in the loop below. */
 702		end++;
 703	}
 704
 705	/*
 706	 * It's ok if we fail to allocate the extent maps, see the comment near
 707	 * the bottom of the loop below. We only need two spare extent maps in
 708	 * the worst case, where the first extent map that intersects our range
 709	 * starts before the range and the last extent map that intersects our
 710	 * range ends after our range (and they might be the same extent map),
 711	 * because we need to split those two extent maps at the boundaries.
 712	 */
 713	split = alloc_extent_map();
 714	split2 = alloc_extent_map();
 715
 716	write_lock(&em_tree->lock);
 717	em = lookup_extent_mapping(em_tree, start, len);
 718
 719	while (em) {
 720		/* extent_map_end() returns exclusive value (last byte + 1). */
 721		const u64 em_end = extent_map_end(em);
 722		struct extent_map *next_em = NULL;
 723		u64 gen;
 724		unsigned long flags;
 725		bool modified;
 726		bool compressed;
 727
 728		if (em_end < end) {
 729			next_em = next_extent_map(em);
 730			if (next_em) {
 731				if (next_em->start < end)
 732					refcount_inc(&next_em->refs);
 733				else
 734					next_em = NULL;
 735			}
 736		}
 737
 738		if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) {
 739			start = em_end;
 
 
 740			goto next;
 741		}
 742
 743		flags = em->flags;
 744		/*
 745		 * In case we split the extent map, we want to preserve the
 746		 * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
 747		 * it on the new extent maps.
 748		 */
 749		em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
 750		modified = !list_empty(&em->list);
 751
 752		/*
 753		 * The extent map does not cross our target range, so no need to
 754		 * split it, we can remove it directly.
 755		 */
 756		if (em->start >= start && em_end <= end)
 757			goto remove_em;
 758
 
 759		gen = em->generation;
 760		compressed = extent_map_is_compressed(em);
 761
 762		if (em->start < start) {
 763			if (!split) {
 764				split = split2;
 765				split2 = NULL;
 766				if (!split)
 767					goto remove_em;
 768			}
 769			split->start = em->start;
 770			split->len = start - em->start;
 771
 772			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 773				split->orig_start = em->orig_start;
 774				split->block_start = em->block_start;
 775
 776				if (compressed)
 777					split->block_len = em->block_len;
 778				else
 779					split->block_len = split->len;
 780				split->orig_block_len = max(split->block_len,
 781						em->orig_block_len);
 782				split->ram_bytes = em->ram_bytes;
 783			} else {
 784				split->orig_start = split->start;
 785				split->block_len = 0;
 786				split->block_start = em->block_start;
 787				split->orig_block_len = 0;
 788				split->ram_bytes = split->len;
 789			}
 790
 791			split->generation = gen;
 792			split->flags = flags;
 
 793			replace_extent_mapping(em_tree, em, split, modified);
 794			free_extent_map(split);
 795			split = split2;
 796			split2 = NULL;
 797		}
 798		if (em_end > end) {
 799			if (!split) {
 800				split = split2;
 801				split2 = NULL;
 802				if (!split)
 803					goto remove_em;
 804			}
 805			split->start = end;
 806			split->len = em_end - end;
 807			split->block_start = em->block_start;
 808			split->flags = flags;
 
 809			split->generation = gen;
 810
 811			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
 812				split->orig_block_len = max(em->block_len,
 813						    em->orig_block_len);
 814
 815				split->ram_bytes = em->ram_bytes;
 816				if (compressed) {
 817					split->block_len = em->block_len;
 818					split->orig_start = em->orig_start;
 819				} else {
 820					const u64 diff = end - em->start;
 821
 822					split->block_len = split->len;
 823					split->block_start += diff;
 824					split->orig_start = em->orig_start;
 825				}
 826			} else {
 827				split->ram_bytes = split->len;
 828				split->orig_start = split->start;
 829				split->block_len = 0;
 830				split->orig_block_len = 0;
 831			}
 832
 833			if (extent_map_in_tree(em)) {
 834				replace_extent_mapping(em_tree, em, split,
 835						       modified);
 836			} else {
 837				int ret;
 838
 839				ret = add_extent_mapping(em_tree, split,
 840							 modified);
 841				/* Logic error, shouldn't happen. */
 842				ASSERT(ret == 0);
 843				if (WARN_ON(ret != 0) && modified)
 844					btrfs_set_inode_full_sync(inode);
 845			}
 846			free_extent_map(split);
 847			split = NULL;
 848		}
 849remove_em:
 850		if (extent_map_in_tree(em)) {
 851			/*
 852			 * If the extent map is still in the tree it means that
 853			 * either of the following is true:
 854			 *
 855			 * 1) It fits entirely in our range (doesn't end beyond
 856			 *    it or starts before it);
 857			 *
 858			 * 2) It starts before our range and/or ends after our
 859			 *    range, and we were not able to allocate the extent
 860			 *    maps for split operations, @split and @split2.
 861			 *
 862			 * If we are at case 2) then we just remove the entire
 863			 * extent map - this is fine since if anyone needs it to
 864			 * access the subranges outside our range, will just
 865			 * load it again from the subvolume tree's file extent
 866			 * item. However if the extent map was in the list of
 867			 * modified extents, then we must mark the inode for a
 868			 * full fsync, otherwise a fast fsync will miss this
 869			 * extent if it's new and needs to be logged.
 870			 */
 871			if ((em->start < start || em_end > end) && modified) {
 872				ASSERT(!split);
 873				btrfs_set_inode_full_sync(inode);
 874			}
 875			remove_extent_mapping(em_tree, em);
 876		}
 877
 878		/*
 879		 * Once for the tree reference (we replaced or removed the
 880		 * extent map from the tree).
 881		 */
 882		free_extent_map(em);
 883next:
 884		/* Once for us (for our lookup reference). */
 885		free_extent_map(em);
 886
 887		em = next_em;
 888	}
 889
 890	write_unlock(&em_tree->lock);
 891
 892	free_extent_map(split);
 893	free_extent_map(split2);
 894}
 895
 896/*
 897 * Replace a range in the inode's extent map tree with a new extent map.
 898 *
 899 * @inode:      The target inode.
 900 * @new_em:     The new extent map to add to the inode's extent map tree.
 901 * @modified:   Indicate if the new extent map should be added to the list of
 902 *              modified extents (for fast fsync tracking).
 903 *
 904 * Drops all the extent maps in the inode's extent map tree that intersect the
 905 * range of the new extent map and adds the new extent map to the tree.
 906 * The caller should have locked an appropriate file range in the inode's io
 907 * tree before calling this function.
 908 */
 909int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
 910				   struct extent_map *new_em,
 911				   bool modified)
 912{
 913	const u64 end = new_em->start + new_em->len - 1;
 914	struct extent_map_tree *tree = &inode->extent_tree;
 915	int ret;
 916
 917	ASSERT(!extent_map_in_tree(new_em));
 918
 919	/*
 920	 * The caller has locked an appropriate file range in the inode's io
 921	 * tree, but getting -EEXIST when adding the new extent map can still
 922	 * happen in case there are extents that partially cover the range, and
 923	 * this is due to two tasks operating on different parts of the extent.
 924	 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
 925	 * btrfs_get_extent") for an example and details.
 926	 */
 927	do {
 928		btrfs_drop_extent_map_range(inode, new_em->start, end, false);
 929		write_lock(&tree->lock);
 930		ret = add_extent_mapping(tree, new_em, modified);
 931		write_unlock(&tree->lock);
 932	} while (ret == -EEXIST);
 933
 934	return ret;
 935}
 936
 937/*
 938 * Split off the first pre bytes from the extent_map at [start, start + len],
 939 * and set the block_start for it to new_logical.
 940 *
 941 * This function is used when an ordered_extent needs to be split.
 942 */
 943int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
 944		     u64 new_logical)
 945{
 946	struct extent_map_tree *em_tree = &inode->extent_tree;
 947	struct extent_map *em;
 948	struct extent_map *split_pre = NULL;
 949	struct extent_map *split_mid = NULL;
 950	int ret = 0;
 951	unsigned long flags;
 952
 953	ASSERT(pre != 0);
 954	ASSERT(pre < len);
 955
 956	split_pre = alloc_extent_map();
 957	if (!split_pre)
 958		return -ENOMEM;
 959	split_mid = alloc_extent_map();
 960	if (!split_mid) {
 961		ret = -ENOMEM;
 962		goto out_free_pre;
 963	}
 964
 965	lock_extent(&inode->io_tree, start, start + len - 1, NULL);
 966	write_lock(&em_tree->lock);
 967	em = lookup_extent_mapping(em_tree, start, len);
 968	if (!em) {
 969		ret = -EIO;
 970		goto out_unlock;
 971	}
 972
 973	ASSERT(em->len == len);
 974	ASSERT(!extent_map_is_compressed(em));
 975	ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
 976	ASSERT(em->flags & EXTENT_FLAG_PINNED);
 977	ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
 978	ASSERT(!list_empty(&em->list));
 979
 980	flags = em->flags;
 981	em->flags &= ~EXTENT_FLAG_PINNED;
 982
 983	/* First, replace the em with a new extent_map starting from * em->start */
 984	split_pre->start = em->start;
 985	split_pre->len = pre;
 986	split_pre->orig_start = split_pre->start;
 987	split_pre->block_start = new_logical;
 988	split_pre->block_len = split_pre->len;
 989	split_pre->orig_block_len = split_pre->block_len;
 990	split_pre->ram_bytes = split_pre->len;
 991	split_pre->flags = flags;
 992	split_pre->generation = em->generation;
 993
 994	replace_extent_mapping(em_tree, em, split_pre, 1);
 995
 996	/*
 997	 * Now we only have an extent_map at:
 998	 *     [em->start, em->start + pre]
 999	 */
1000
1001	/* Insert the middle extent_map. */
1002	split_mid->start = em->start + pre;
1003	split_mid->len = em->len - pre;
1004	split_mid->orig_start = split_mid->start;
1005	split_mid->block_start = em->block_start + pre;
1006	split_mid->block_len = split_mid->len;
1007	split_mid->orig_block_len = split_mid->block_len;
1008	split_mid->ram_bytes = split_mid->len;
1009	split_mid->flags = flags;
1010	split_mid->generation = em->generation;
1011	add_extent_mapping(em_tree, split_mid, 1);
1012
1013	/* Once for us */
1014	free_extent_map(em);
1015	/* Once for the tree */
1016	free_extent_map(em);
1017
1018out_unlock:
1019	write_unlock(&em_tree->lock);
1020	unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
1021	free_extent_map(split_mid);
1022out_free_pre:
1023	free_extent_map(split_pre);
1024	return ret;
1025}