Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/err.h>
  4#include <linux/slab.h>
  5#include <linux/spinlock.h>
  6#include "messages.h"
  7#include "ctree.h"
  8#include "volumes.h"
  9#include "extent_map.h"
 10#include "compression.h"
 11#include "btrfs_inode.h"
 
 12
 13
 14static struct kmem_cache *extent_map_cache;
 15
 16int __init extent_map_init(void)
 17{
 18	extent_map_cache = kmem_cache_create("btrfs_extent_map",
 19			sizeof(struct extent_map), 0,
 20			SLAB_MEM_SPREAD, NULL);
 21	if (!extent_map_cache)
 22		return -ENOMEM;
 23	return 0;
 24}
 25
 26void __cold extent_map_exit(void)
 27{
 28	kmem_cache_destroy(extent_map_cache);
 29}
 30
 31/*
 32 * Initialize the extent tree @tree.  Should be called for each new inode or
 33 * other user of the extent_map interface.
 34 */
 35void extent_map_tree_init(struct extent_map_tree *tree)
 36{
 37	tree->map = RB_ROOT_CACHED;
 38	INIT_LIST_HEAD(&tree->modified_extents);
 39	rwlock_init(&tree->lock);
 40}
 41
 42/*
 43 * Allocate a new extent_map structure.  The new structure is returned with a
 44 * reference count of one and needs to be freed using free_extent_map()
 45 */
 46struct extent_map *alloc_extent_map(void)
 47{
 48	struct extent_map *em;
 49	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
 50	if (!em)
 51		return NULL;
 52	RB_CLEAR_NODE(&em->rb_node);
 53	em->compress_type = BTRFS_COMPRESS_NONE;
 54	refcount_set(&em->refs, 1);
 55	INIT_LIST_HEAD(&em->list);
 56	return em;
 57}
 58
 59/*
 60 * Drop the reference out on @em by one and free the structure if the reference
 61 * count hits zero.
 62 */
 63void free_extent_map(struct extent_map *em)
 64{
 65	if (!em)
 66		return;
 67	if (refcount_dec_and_test(&em->refs)) {
 68		WARN_ON(extent_map_in_tree(em));
 69		WARN_ON(!list_empty(&em->list));
 70		if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
 71			kfree(em->map_lookup);
 72		kmem_cache_free(extent_map_cache, em);
 73	}
 74}
 75
 76/* Do the math around the end of an extent, handling wrapping. */
 77static u64 range_end(u64 start, u64 len)
 78{
 79	if (start + len < start)
 80		return (u64)-1;
 81	return start + len;
 82}
 83
 84static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
 85{
 86	struct rb_node **p = &root->rb_root.rb_node;
 
 
 
 
 
 
 
 
 
 
 
 87	struct rb_node *parent = NULL;
 88	struct extent_map *entry = NULL;
 89	struct rb_node *orig_parent = NULL;
 90	u64 end = range_end(em->start, em->len);
 91	bool leftmost = true;
 92
 93	while (*p) {
 94		parent = *p;
 95		entry = rb_entry(parent, struct extent_map, rb_node);
 96
 97		if (em->start < entry->start) {
 98			p = &(*p)->rb_left;
 99		} else if (em->start >= extent_map_end(entry)) {
100			p = &(*p)->rb_right;
101			leftmost = false;
102		} else {
103			return -EEXIST;
104		}
105	}
106
107	orig_parent = parent;
108	while (parent && em->start >= extent_map_end(entry)) {
109		parent = rb_next(parent);
110		entry = rb_entry(parent, struct extent_map, rb_node);
111	}
112	if (parent)
113		if (end > entry->start && em->start < extent_map_end(entry))
114			return -EEXIST;
115
116	parent = orig_parent;
117	entry = rb_entry(parent, struct extent_map, rb_node);
118	while (parent && em->start < entry->start) {
119		parent = rb_prev(parent);
120		entry = rb_entry(parent, struct extent_map, rb_node);
121	}
122	if (parent)
123		if (end > entry->start && em->start < extent_map_end(entry))
124			return -EEXIST;
125
126	rb_link_node(&em->rb_node, orig_parent, p);
127	rb_insert_color_cached(&em->rb_node, root, leftmost);
128	return 0;
129}
130
131/*
132 * Search through the tree for an extent_map with a given offset.  If it can't
133 * be found, try to find some neighboring extents
134 */
135static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
136				     struct rb_node **prev_or_next_ret)
137{
138	struct rb_node *n = root->rb_node;
139	struct rb_node *prev = NULL;
140	struct rb_node *orig_prev = NULL;
141	struct extent_map *entry;
142	struct extent_map *prev_entry = NULL;
143
144	ASSERT(prev_or_next_ret);
145
146	while (n) {
147		entry = rb_entry(n, struct extent_map, rb_node);
148		prev = n;
149		prev_entry = entry;
150
151		if (offset < entry->start)
152			n = n->rb_left;
153		else if (offset >= extent_map_end(entry))
154			n = n->rb_right;
155		else
156			return n;
157	}
158
159	orig_prev = prev;
160	while (prev && offset >= extent_map_end(prev_entry)) {
161		prev = rb_next(prev);
162		prev_entry = rb_entry(prev, struct extent_map, rb_node);
163	}
164
165	/*
166	 * Previous extent map found, return as in this case the caller does not
167	 * care about the next one.
168	 */
169	if (prev) {
170		*prev_or_next_ret = prev;
171		return NULL;
172	}
173
174	prev = orig_prev;
175	prev_entry = rb_entry(prev, struct extent_map, rb_node);
176	while (prev && offset < prev_entry->start) {
177		prev = rb_prev(prev);
178		prev_entry = rb_entry(prev, struct extent_map, rb_node);
179	}
180	*prev_or_next_ret = prev;
181
182	return NULL;
183}
184
185/* Check to see if two extent_map structs are adjacent and safe to merge. */
186static int mergable_maps(struct extent_map *prev, struct extent_map *next)
187{
188	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
189		return 0;
 
 
190
191	/*
192	 * don't merge compressed extents, we need to know their
193	 * actual size
194	 */
195	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
196		return 0;
197
198	if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
199	    test_bit(EXTENT_FLAG_LOGGING, &next->flags))
200		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
202	/*
203	 * We don't want to merge stuff that hasn't been written to the log yet
204	 * since it may not reflect exactly what is on disk, and that would be
205	 * bad.
206	 */
207	if (!list_empty(&prev->list) || !list_empty(&next->list))
208		return 0;
 
 
 
 
 
 
 
 
 
209
210	ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
211	       prev->block_start != EXTENT_MAP_DELALLOC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
213	if (prev->map_lookup || next->map_lookup)
214		ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
215		       test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
216
217	if (extent_map_end(prev) == next->start &&
218	    prev->flags == next->flags &&
219	    prev->map_lookup == next->map_lookup &&
220	    ((next->block_start == EXTENT_MAP_HOLE &&
221	      prev->block_start == EXTENT_MAP_HOLE) ||
222	     (next->block_start == EXTENT_MAP_INLINE &&
223	      prev->block_start == EXTENT_MAP_INLINE) ||
224	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
225	      next->block_start == extent_map_block_end(prev)))) {
226		return 1;
 
 
 
 
 
 
227	}
228	return 0;
229}
230
231static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
232{
 
233	struct extent_map *merge = NULL;
234	struct rb_node *rb;
235
236	/*
237	 * We can't modify an extent map that is in the tree and that is being
238	 * used by another task, as it can cause that other task to see it in
239	 * inconsistent state during the merging. We always have 1 reference for
240	 * the tree and 1 for this task (which is unpinning the extent map or
241	 * clearing the logging flag), so anything > 2 means it's being used by
242	 * other tasks too.
243	 */
244	if (refcount_read(&em->refs) > 2)
245		return;
246
 
 
 
247	if (em->start != 0) {
248		rb = rb_prev(&em->rb_node);
249		if (rb)
250			merge = rb_entry(rb, struct extent_map, rb_node);
251		if (rb && mergable_maps(merge, em)) {
252			em->start = merge->start;
253			em->orig_start = merge->orig_start;
254			em->len += merge->len;
255			em->block_len += merge->block_len;
256			em->block_start = merge->block_start;
257			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
258			em->mod_start = merge->mod_start;
259			em->generation = max(em->generation, merge->generation);
260			set_bit(EXTENT_FLAG_MERGED, &em->flags);
261
262			rb_erase_cached(&merge->rb_node, &tree->map);
263			RB_CLEAR_NODE(&merge->rb_node);
 
 
 
 
264			free_extent_map(merge);
265		}
266	}
267
268	rb = rb_next(&em->rb_node);
269	if (rb)
270		merge = rb_entry(rb, struct extent_map, rb_node);
271	if (rb && mergable_maps(em, merge)) {
272		em->len += merge->len;
273		em->block_len += merge->block_len;
274		rb_erase_cached(&merge->rb_node, &tree->map);
275		RB_CLEAR_NODE(&merge->rb_node);
276		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
277		em->generation = max(em->generation, merge->generation);
278		set_bit(EXTENT_FLAG_MERGED, &em->flags);
 
279		free_extent_map(merge);
280	}
281}
282
283/*
284 * Unpin an extent from the cache.
285 *
286 * @tree:	tree to unpin the extent in
287 * @start:	logical offset in the file
288 * @len:	length of the extent
289 * @gen:	generation that this extent has been modified in
290 *
291 * Called after an extent has been written to disk properly.  Set the generation
292 * to the generation that actually added the file item to the inode so we know
293 * we need to sync this extent when we call fsync().
 
 
 
 
294 */
295int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
296		       u64 gen)
297{
 
 
298	int ret = 0;
299	struct extent_map *em;
300	bool prealloc = false;
301
302	write_lock(&tree->lock);
303	em = lookup_extent_mapping(tree, start, len);
304
305	WARN_ON(!em || em->start != start);
306
307	if (!em)
 
 
 
308		goto out;
 
309
310	em->generation = gen;
311	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
312	em->mod_start = em->start;
313	em->mod_len = em->len;
314
315	if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
316		prealloc = true;
317		clear_bit(EXTENT_FLAG_FILLING, &em->flags);
318	}
319
320	try_merge_map(tree, em);
 
321
322	if (prealloc) {
323		em->mod_start = em->start;
324		em->mod_len = em->len;
325	}
326
327	free_extent_map(em);
328out:
329	write_unlock(&tree->lock);
 
330	return ret;
331
332}
333
334void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
335{
336	lockdep_assert_held_write(&tree->lock);
337
338	clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
339	if (extent_map_in_tree(em))
340		try_merge_map(tree, em);
341}
342
343static inline void setup_extent_mapping(struct extent_map_tree *tree,
344					struct extent_map *em,
345					int modified)
346{
347	refcount_inc(&em->refs);
348	em->mod_start = em->start;
349	em->mod_len = em->len;
350
351	if (modified)
352		list_move(&em->list, &tree->modified_extents);
353	else
354		try_merge_map(tree, em);
355}
356
357static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
358{
359	struct map_lookup *map = em->map_lookup;
360	u64 stripe_size = em->orig_block_len;
361	int i;
362
363	for (i = 0; i < map->num_stripes; i++) {
364		struct btrfs_io_stripe *stripe = &map->stripes[i];
365		struct btrfs_device *device = stripe->dev;
366
367		set_extent_bits_nowait(&device->alloc_state, stripe->physical,
368				 stripe->physical + stripe_size - 1, bits);
369	}
370}
371
372static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
373{
374	struct map_lookup *map = em->map_lookup;
375	u64 stripe_size = em->orig_block_len;
376	int i;
377
378	for (i = 0; i < map->num_stripes; i++) {
379		struct btrfs_io_stripe *stripe = &map->stripes[i];
380		struct btrfs_device *device = stripe->dev;
381
382		__clear_extent_bit(&device->alloc_state, stripe->physical,
383				   stripe->physical + stripe_size - 1, bits,
384				   NULL, GFP_NOWAIT, NULL);
385	}
386}
387
388/*
389 * Add new extent map to the extent tree
390 *
391 * @tree:	tree to insert new map in
392 * @em:		map to insert
393 * @modified:	indicate whether the given @em should be added to the
394 *	        modified list, which indicates the extent needs to be logged
395 *
396 * Insert @em into @tree or perform a simple forward/backward merge with
397 * existing mappings.  The extent_map struct passed in will be inserted
398 * into the tree directly, with an additional reference taken, or a
399 * reference dropped if the merge attempt was successful.
400 */
401int add_extent_mapping(struct extent_map_tree *tree,
402		       struct extent_map *em, int modified)
403{
404	int ret = 0;
 
 
 
405
406	lockdep_assert_held_write(&tree->lock);
407
408	ret = tree_insert(&tree->map, em);
 
409	if (ret)
410		goto out;
411
412	setup_extent_mapping(tree, em, modified);
413	if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
414		extent_map_device_set_bits(em, CHUNK_ALLOCATED);
415		extent_map_device_clear_bits(em, CHUNK_TRIMMED);
416	}
417out:
418	return ret;
419}
420
421static struct extent_map *
422__lookup_extent_mapping(struct extent_map_tree *tree,
423			u64 start, u64 len, int strict)
424{
425	struct extent_map *em;
426	struct rb_node *rb_node;
427	struct rb_node *prev_or_next = NULL;
428	u64 end = range_end(start, len);
429
430	rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
431	if (!rb_node) {
432		if (prev_or_next)
433			rb_node = prev_or_next;
434		else
435			return NULL;
436	}
437
438	em = rb_entry(rb_node, struct extent_map, rb_node);
439
440	if (strict && !(end > em->start && start < extent_map_end(em)))
441		return NULL;
442
443	refcount_inc(&em->refs);
444	return em;
445}
446
447/*
448 * Lookup extent_map that intersects @start + @len range.
449 *
450 * @tree:	tree to lookup in
451 * @start:	byte offset to start the search
452 * @len:	length of the lookup range
453 *
454 * Find and return the first extent_map struct in @tree that intersects the
455 * [start, len] range.  There may be additional objects in the tree that
456 * intersect, so check the object returned carefully to make sure that no
457 * additional lookups are needed.
458 */
459struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
460					 u64 start, u64 len)
461{
462	return __lookup_extent_mapping(tree, start, len, 1);
463}
464
465/*
466 * Find a nearby extent map intersecting @start + @len (not an exact search).
467 *
468 * @tree:	tree to lookup in
469 * @start:	byte offset to start the search
470 * @len:	length of the lookup range
471 *
472 * Find and return the first extent_map struct in @tree that intersects the
473 * [start, len] range.
474 *
475 * If one can't be found, any nearby extent may be returned
476 */
477struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
478					 u64 start, u64 len)
479{
480	return __lookup_extent_mapping(tree, start, len, 0);
481}
482
483/*
484 * Remove an extent_map from the extent tree.
485 *
486 * @tree:	extent tree to remove from
487 * @em:		extent map being removed
488 *
489 * Remove @em from @tree.  No reference counts are dropped, and no checks
490 * are done to see if the range is in use.
491 */
492void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
493{
 
 
494	lockdep_assert_held_write(&tree->lock);
495
496	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
497	rb_erase_cached(&em->rb_node, &tree->map);
498	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
499		list_del_init(&em->list);
500	if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
501		extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
502	RB_CLEAR_NODE(&em->rb_node);
503}
504
505void replace_extent_mapping(struct extent_map_tree *tree,
506			    struct extent_map *cur,
507			    struct extent_map *new,
508			    int modified)
509{
 
 
 
510	lockdep_assert_held_write(&tree->lock);
511
512	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
 
 
513	ASSERT(extent_map_in_tree(cur));
514	if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
515		list_del_init(&cur->list);
516	rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
517	RB_CLEAR_NODE(&cur->rb_node);
518
519	setup_extent_mapping(tree, new, modified);
520}
521
522static struct extent_map *next_extent_map(const struct extent_map *em)
523{
524	struct rb_node *next;
525
526	next = rb_next(&em->rb_node);
527	if (!next)
528		return NULL;
529	return container_of(next, struct extent_map, rb_node);
530}
531
532static struct extent_map *prev_extent_map(struct extent_map *em)
533{
534	struct rb_node *prev;
535
536	prev = rb_prev(&em->rb_node);
537	if (!prev)
538		return NULL;
539	return container_of(prev, struct extent_map, rb_node);
540}
541
542/*
543 * Helper for btrfs_get_extent.  Given an existing extent in the tree,
544 * the existing extent is the nearest extent to map_start,
545 * and an extent that you want to insert, deal with overlap and insert
546 * the best fitted new extent into the tree.
547 */
548static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
549					 struct extent_map *existing,
550					 struct extent_map *em,
551					 u64 map_start)
552{
553	struct extent_map *prev;
554	struct extent_map *next;
555	u64 start;
556	u64 end;
557	u64 start_diff;
558
559	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
 
560
561	if (existing->start > map_start) {
562		next = existing;
563		prev = prev_extent_map(next);
564	} else {
565		prev = existing;
566		next = next_extent_map(prev);
567	}
568
569	start = prev ? extent_map_end(prev) : em->start;
570	start = max_t(u64, start, em->start);
571	end = next ? next->start : extent_map_end(em);
572	end = min_t(u64, end, extent_map_end(em));
573	start_diff = start - em->start;
574	em->start = start;
575	em->len = end - start;
576	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
577	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
578		em->block_start += start_diff;
579		em->block_len = em->len;
580	}
581	return add_extent_mapping(em_tree, em, 0);
582}
583
584/*
585 * Add extent mapping into em_tree.
586 *
587 * @fs_info:  the filesystem
588 * @em_tree:  extent tree into which we want to insert the extent mapping
589 * @em_in:    extent we are inserting
590 * @start:    start of the logical range btrfs_get_extent() is requesting
591 * @len:      length of the logical range btrfs_get_extent() is requesting
592 *
593 * Note that @em_in's range may be different from [start, start+len),
594 * but they must be overlapped.
595 *
596 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
597 * the -EEXIST by either:
598 * a) Returning the existing extent in @em_in if @start is within the
599 *    existing em.
600 * b) Merge the existing extent with @em_in passed in.
601 *
602 * Return 0 on success, otherwise -EEXIST.
603 *
604 */
605int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
606			     struct extent_map_tree *em_tree,
607			     struct extent_map **em_in, u64 start, u64 len)
608{
609	int ret;
610	struct extent_map *em = *em_in;
 
611
612	/*
613	 * Tree-checker should have rejected any inline extent with non-zero
614	 * file offset. Here just do a sanity check.
615	 */
616	if (em->block_start == EXTENT_MAP_INLINE)
617		ASSERT(em->start == 0);
618
619	ret = add_extent_mapping(em_tree, em, 0);
620	/* it is possible that someone inserted the extent into the tree
621	 * while we had the lock dropped.  It is also possible that
622	 * an overlapping map exists in the tree
623	 */
624	if (ret == -EEXIST) {
625		struct extent_map *existing;
626
627		ret = 0;
628
629		existing = search_extent_mapping(em_tree, start, len);
630
631		trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
632
633		/*
634		 * existing will always be non-NULL, since there must be
635		 * extent causing the -EEXIST.
636		 */
637		if (start >= existing->start &&
638		    start < extent_map_end(existing)) {
639			free_extent_map(em);
640			*em_in = existing;
641			ret = 0;
642		} else {
643			u64 orig_start = em->start;
644			u64 orig_len = em->len;
645
646			/*
647			 * The existing extent map is the one nearest to
648			 * the [start, start + len) range which overlaps
649			 */
650			ret = merge_extent_mapping(em_tree, existing,
651						   em, start);
652			if (ret) {
653				free_extent_map(em);
654				*em_in = NULL;
655				WARN_ONCE(ret,
656"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
657					  ret, existing->start, existing->len,
658					  orig_start, orig_len);
659			}
660			free_extent_map(existing);
661		}
662	}
663
664	ASSERT(ret == 0 || ret == -EEXIST);
665	return ret;
666}
667
668/*
669 * Drop all extent maps from a tree in the fastest possible way, rescheduling
670 * if needed. This avoids searching the tree, from the root down to the first
671 * extent map, before each deletion.
672 */
673static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
674{
 
 
 
675	write_lock(&tree->lock);
676	while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
 
677		struct extent_map *em;
678		struct rb_node *node;
679
680		node = rb_first_cached(&tree->map);
681		em = rb_entry(node, struct extent_map, rb_node);
682		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
683		clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
684		remove_extent_mapping(tree, em);
685		free_extent_map(em);
686		cond_resched_rwlock_write(&tree->lock);
 
 
 
 
687	}
688	write_unlock(&tree->lock);
689}
690
691/*
692 * Drop all extent maps in a given range.
693 *
694 * @inode:       The target inode.
695 * @start:       Start offset of the range.
696 * @end:         End offset of the range (inclusive value).
697 * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
698 *
699 * This drops all the extent maps that intersect the given range [@start, @end].
700 * Extent maps that partially overlap the range and extend behind or beyond it,
701 * are split.
702 * The caller should have locked an appropriate file range in the inode's io
703 * tree before calling this function.
704 */
705void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
706				 bool skip_pinned)
707{
708	struct extent_map *split;
709	struct extent_map *split2;
710	struct extent_map *em;
711	struct extent_map_tree *em_tree = &inode->extent_tree;
712	u64 len = end - start + 1;
713
714	WARN_ON(end < start);
715	if (end == (u64)-1) {
716		if (start == 0 && !skip_pinned) {
717			drop_all_extent_maps_fast(em_tree);
718			return;
719		}
720		len = (u64)-1;
721	} else {
722		/* Make end offset exclusive for use in the loop below. */
723		end++;
724	}
725
726	/*
727	 * It's ok if we fail to allocate the extent maps, see the comment near
728	 * the bottom of the loop below. We only need two spare extent maps in
729	 * the worst case, where the first extent map that intersects our range
730	 * starts before the range and the last extent map that intersects our
731	 * range ends after our range (and they might be the same extent map),
732	 * because we need to split those two extent maps at the boundaries.
733	 */
734	split = alloc_extent_map();
735	split2 = alloc_extent_map();
736
737	write_lock(&em_tree->lock);
738	em = lookup_extent_mapping(em_tree, start, len);
739
740	while (em) {
741		/* extent_map_end() returns exclusive value (last byte + 1). */
742		const u64 em_end = extent_map_end(em);
743		struct extent_map *next_em = NULL;
744		u64 gen;
745		unsigned long flags;
746		bool modified;
747		bool compressed;
748
749		if (em_end < end) {
750			next_em = next_extent_map(em);
751			if (next_em) {
752				if (next_em->start < end)
753					refcount_inc(&next_em->refs);
754				else
755					next_em = NULL;
756			}
757		}
758
759		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
760			start = em_end;
761			if (end != (u64)-1)
762				len = start + len - em_end;
763			goto next;
764		}
765
766		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
767		clear_bit(EXTENT_FLAG_LOGGING, &flags);
 
 
 
 
 
768		modified = !list_empty(&em->list);
769
770		/*
771		 * The extent map does not cross our target range, so no need to
772		 * split it, we can remove it directly.
773		 */
774		if (em->start >= start && em_end <= end)
775			goto remove_em;
776
777		flags = em->flags;
778		gen = em->generation;
779		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
780
781		if (em->start < start) {
782			if (!split) {
783				split = split2;
784				split2 = NULL;
785				if (!split)
786					goto remove_em;
787			}
788			split->start = em->start;
789			split->len = start - em->start;
790
791			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
792				split->orig_start = em->orig_start;
793				split->block_start = em->block_start;
794
795				if (compressed)
796					split->block_len = em->block_len;
797				else
798					split->block_len = split->len;
799				split->orig_block_len = max(split->block_len,
800						em->orig_block_len);
801				split->ram_bytes = em->ram_bytes;
802			} else {
803				split->orig_start = split->start;
804				split->block_len = 0;
805				split->block_start = em->block_start;
806				split->orig_block_len = 0;
807				split->ram_bytes = split->len;
808			}
809
810			split->generation = gen;
811			split->flags = flags;
812			split->compress_type = em->compress_type;
813			replace_extent_mapping(em_tree, em, split, modified);
814			free_extent_map(split);
815			split = split2;
816			split2 = NULL;
817		}
818		if (em_end > end) {
819			if (!split) {
820				split = split2;
821				split2 = NULL;
822				if (!split)
823					goto remove_em;
824			}
825			split->start = start + len;
826			split->len = em_end - (start + len);
827			split->block_start = em->block_start;
828			split->flags = flags;
829			split->compress_type = em->compress_type;
830			split->generation = gen;
831
832			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
833				split->orig_block_len = max(em->block_len,
834						    em->orig_block_len);
835
836				split->ram_bytes = em->ram_bytes;
837				if (compressed) {
838					split->block_len = em->block_len;
839					split->orig_start = em->orig_start;
840				} else {
841					const u64 diff = start + len - em->start;
842
843					split->block_len = split->len;
844					split->block_start += diff;
845					split->orig_start = em->orig_start;
846				}
847			} else {
 
 
848				split->ram_bytes = split->len;
849				split->orig_start = split->start;
850				split->block_len = 0;
851				split->orig_block_len = 0;
852			}
853
854			if (extent_map_in_tree(em)) {
855				replace_extent_mapping(em_tree, em, split,
856						       modified);
857			} else {
858				int ret;
859
860				ret = add_extent_mapping(em_tree, split,
861							 modified);
862				/* Logic error, shouldn't happen. */
863				ASSERT(ret == 0);
864				if (WARN_ON(ret != 0) && modified)
865					btrfs_set_inode_full_sync(inode);
866			}
867			free_extent_map(split);
868			split = NULL;
869		}
870remove_em:
871		if (extent_map_in_tree(em)) {
872			/*
873			 * If the extent map is still in the tree it means that
874			 * either of the following is true:
875			 *
876			 * 1) It fits entirely in our range (doesn't end beyond
877			 *    it or starts before it);
878			 *
879			 * 2) It starts before our range and/or ends after our
880			 *    range, and we were not able to allocate the extent
881			 *    maps for split operations, @split and @split2.
882			 *
883			 * If we are at case 2) then we just remove the entire
884			 * extent map - this is fine since if anyone needs it to
885			 * access the subranges outside our range, will just
886			 * load it again from the subvolume tree's file extent
887			 * item. However if the extent map was in the list of
888			 * modified extents, then we must mark the inode for a
889			 * full fsync, otherwise a fast fsync will miss this
890			 * extent if it's new and needs to be logged.
891			 */
892			if ((em->start < start || em_end > end) && modified) {
893				ASSERT(!split);
894				btrfs_set_inode_full_sync(inode);
895			}
896			remove_extent_mapping(em_tree, em);
897		}
898
899		/*
900		 * Once for the tree reference (we replaced or removed the
901		 * extent map from the tree).
902		 */
903		free_extent_map(em);
904next:
905		/* Once for us (for our lookup reference). */
906		free_extent_map(em);
907
908		em = next_em;
909	}
910
911	write_unlock(&em_tree->lock);
912
913	free_extent_map(split);
914	free_extent_map(split2);
915}
916
917/*
918 * Replace a range in the inode's extent map tree with a new extent map.
919 *
920 * @inode:      The target inode.
921 * @new_em:     The new extent map to add to the inode's extent map tree.
922 * @modified:   Indicate if the new extent map should be added to the list of
923 *              modified extents (for fast fsync tracking).
924 *
925 * Drops all the extent maps in the inode's extent map tree that intersect the
926 * range of the new extent map and adds the new extent map to the tree.
927 * The caller should have locked an appropriate file range in the inode's io
928 * tree before calling this function.
929 */
930int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
931				   struct extent_map *new_em,
932				   bool modified)
933{
934	const u64 end = new_em->start + new_em->len - 1;
935	struct extent_map_tree *tree = &inode->extent_tree;
936	int ret;
937
938	ASSERT(!extent_map_in_tree(new_em));
939
940	/*
941	 * The caller has locked an appropriate file range in the inode's io
942	 * tree, but getting -EEXIST when adding the new extent map can still
943	 * happen in case there are extents that partially cover the range, and
944	 * this is due to two tasks operating on different parts of the extent.
945	 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
946	 * btrfs_get_extent") for an example and details.
947	 */
948	do {
949		btrfs_drop_extent_map_range(inode, new_em->start, end, false);
950		write_lock(&tree->lock);
951		ret = add_extent_mapping(tree, new_em, modified);
952		write_unlock(&tree->lock);
953	} while (ret == -EEXIST);
954
955	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
956}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/err.h>
   4#include <linux/slab.h>
   5#include <linux/spinlock.h>
   6#include "messages.h"
   7#include "ctree.h"
 
   8#include "extent_map.h"
   9#include "compression.h"
  10#include "btrfs_inode.h"
  11#include "disk-io.h"
  12
  13
  14static struct kmem_cache *extent_map_cache;
  15
  16int __init extent_map_init(void)
  17{
  18	extent_map_cache = kmem_cache_create("btrfs_extent_map",
  19					     sizeof(struct extent_map), 0, 0, NULL);
 
  20	if (!extent_map_cache)
  21		return -ENOMEM;
  22	return 0;
  23}
  24
  25void __cold extent_map_exit(void)
  26{
  27	kmem_cache_destroy(extent_map_cache);
  28}
  29
  30/*
  31 * Initialize the extent tree @tree.  Should be called for each new inode or
  32 * other user of the extent_map interface.
  33 */
  34void extent_map_tree_init(struct extent_map_tree *tree)
  35{
  36	tree->root = RB_ROOT;
  37	INIT_LIST_HEAD(&tree->modified_extents);
  38	rwlock_init(&tree->lock);
  39}
  40
  41/*
  42 * Allocate a new extent_map structure.  The new structure is returned with a
  43 * reference count of one and needs to be freed using free_extent_map()
  44 */
  45struct extent_map *alloc_extent_map(void)
  46{
  47	struct extent_map *em;
  48	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
  49	if (!em)
  50		return NULL;
  51	RB_CLEAR_NODE(&em->rb_node);
 
  52	refcount_set(&em->refs, 1);
  53	INIT_LIST_HEAD(&em->list);
  54	return em;
  55}
  56
  57/*
  58 * Drop the reference out on @em by one and free the structure if the reference
  59 * count hits zero.
  60 */
  61void free_extent_map(struct extent_map *em)
  62{
  63	if (!em)
  64		return;
  65	if (refcount_dec_and_test(&em->refs)) {
  66		WARN_ON(extent_map_in_tree(em));
  67		WARN_ON(!list_empty(&em->list));
 
 
  68		kmem_cache_free(extent_map_cache, em);
  69	}
  70}
  71
  72/* Do the math around the end of an extent, handling wrapping. */
  73static u64 range_end(u64 start, u64 len)
  74{
  75	if (start + len < start)
  76		return (u64)-1;
  77	return start + len;
  78}
  79
  80static void remove_em(struct btrfs_inode *inode, struct extent_map *em)
  81{
  82	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  83
  84	rb_erase(&em->rb_node, &inode->extent_tree.root);
  85	RB_CLEAR_NODE(&em->rb_node);
  86
  87	if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(inode->root)))
  88		percpu_counter_dec(&fs_info->evictable_extent_maps);
  89}
  90
  91static int tree_insert(struct rb_root *root, struct extent_map *em)
  92{
  93	struct rb_node **p = &root->rb_node;
  94	struct rb_node *parent = NULL;
  95	struct extent_map *entry = NULL;
  96	struct rb_node *orig_parent = NULL;
  97	u64 end = range_end(em->start, em->len);
 
  98
  99	while (*p) {
 100		parent = *p;
 101		entry = rb_entry(parent, struct extent_map, rb_node);
 102
 103		if (em->start < entry->start)
 104			p = &(*p)->rb_left;
 105		else if (em->start >= extent_map_end(entry))
 106			p = &(*p)->rb_right;
 107		else
 
 108			return -EEXIST;
 
 109	}
 110
 111	orig_parent = parent;
 112	while (parent && em->start >= extent_map_end(entry)) {
 113		parent = rb_next(parent);
 114		entry = rb_entry(parent, struct extent_map, rb_node);
 115	}
 116	if (parent)
 117		if (end > entry->start && em->start < extent_map_end(entry))
 118			return -EEXIST;
 119
 120	parent = orig_parent;
 121	entry = rb_entry(parent, struct extent_map, rb_node);
 122	while (parent && em->start < entry->start) {
 123		parent = rb_prev(parent);
 124		entry = rb_entry(parent, struct extent_map, rb_node);
 125	}
 126	if (parent)
 127		if (end > entry->start && em->start < extent_map_end(entry))
 128			return -EEXIST;
 129
 130	rb_link_node(&em->rb_node, orig_parent, p);
 131	rb_insert_color(&em->rb_node, root);
 132	return 0;
 133}
 134
 135/*
 136 * Search through the tree for an extent_map with a given offset.  If it can't
 137 * be found, try to find some neighboring extents
 138 */
 139static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
 140				     struct rb_node **prev_or_next_ret)
 141{
 142	struct rb_node *n = root->rb_node;
 143	struct rb_node *prev = NULL;
 144	struct rb_node *orig_prev = NULL;
 145	struct extent_map *entry;
 146	struct extent_map *prev_entry = NULL;
 147
 148	ASSERT(prev_or_next_ret);
 149
 150	while (n) {
 151		entry = rb_entry(n, struct extent_map, rb_node);
 152		prev = n;
 153		prev_entry = entry;
 154
 155		if (offset < entry->start)
 156			n = n->rb_left;
 157		else if (offset >= extent_map_end(entry))
 158			n = n->rb_right;
 159		else
 160			return n;
 161	}
 162
 163	orig_prev = prev;
 164	while (prev && offset >= extent_map_end(prev_entry)) {
 165		prev = rb_next(prev);
 166		prev_entry = rb_entry(prev, struct extent_map, rb_node);
 167	}
 168
 169	/*
 170	 * Previous extent map found, return as in this case the caller does not
 171	 * care about the next one.
 172	 */
 173	if (prev) {
 174		*prev_or_next_ret = prev;
 175		return NULL;
 176	}
 177
 178	prev = orig_prev;
 179	prev_entry = rb_entry(prev, struct extent_map, rb_node);
 180	while (prev && offset < prev_entry->start) {
 181		prev = rb_prev(prev);
 182		prev_entry = rb_entry(prev, struct extent_map, rb_node);
 183	}
 184	*prev_or_next_ret = prev;
 185
 186	return NULL;
 187}
 188
 189static inline u64 extent_map_block_len(const struct extent_map *em)
 
 190{
 191	if (extent_map_is_compressed(em))
 192		return em->disk_num_bytes;
 193	return em->len;
 194}
 195
 196static inline u64 extent_map_block_end(const struct extent_map *em)
 197{
 198	const u64 block_start = extent_map_block_start(em);
 199	const u64 block_end = block_start + extent_map_block_len(em);
 
 
 200
 201	if (block_end < block_start)
 202		return (u64)-1;
 203
 204	return block_end;
 205}
 206
 207static bool can_merge_extent_map(const struct extent_map *em)
 208{
 209	if (em->flags & EXTENT_FLAG_PINNED)
 210		return false;
 211
 212	/* Don't merge compressed extents, we need to know their actual size. */
 213	if (extent_map_is_compressed(em))
 214		return false;
 215
 216	if (em->flags & EXTENT_FLAG_LOGGING)
 217		return false;
 218
 219	/*
 220	 * We don't want to merge stuff that hasn't been written to the log yet
 221	 * since it may not reflect exactly what is on disk, and that would be
 222	 * bad.
 223	 */
 224	if (!list_empty(&em->list))
 225		return false;
 226
 227	return true;
 228}
 229
 230/* Check to see if two extent_map structs are adjacent and safe to merge. */
 231static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next)
 232{
 233	if (extent_map_end(prev) != next->start)
 234		return false;
 235
 236	/*
 237	 * The merged flag is not an on-disk flag, it just indicates we had the
 238	 * extent maps of 2 (or more) adjacent extents merged, so factor it out.
 239	 */
 240	if ((prev->flags & ~EXTENT_FLAG_MERGED) !=
 241	    (next->flags & ~EXTENT_FLAG_MERGED))
 242		return false;
 243
 244	if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1)
 245		return extent_map_block_start(next) == extent_map_block_end(prev);
 246
 247	/* HOLES and INLINE extents. */
 248	return next->disk_bytenr == prev->disk_bytenr;
 249}
 250
 251/*
 252 * Handle the on-disk data extents merge for @prev and @next.
 253 *
 254 * @prev:    left extent to merge
 255 * @next:    right extent to merge
 256 * @merged:  the extent we will not discard after the merge; updated with new values
 257 *
 258 * After this, one of the two extents is the new merged extent and the other is
 259 * removed from the tree and likely freed. Note that @merged is one of @prev/@next
 260 * so there is const/non-const aliasing occurring here.
 261 *
 262 * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes.
 263 * For now only uncompressed regular extent can be merged.
 264 */
 265static void merge_ondisk_extents(const struct extent_map *prev, const struct extent_map *next,
 266				 struct extent_map *merged)
 267{
 268	u64 new_disk_bytenr;
 269	u64 new_disk_num_bytes;
 270	u64 new_offset;
 271
 272	/* @prev and @next should not be compressed. */
 273	ASSERT(!extent_map_is_compressed(prev));
 274	ASSERT(!extent_map_is_compressed(next));
 275
 276	/*
 277	 * There are two different cases where @prev and @next can be merged.
 278	 *
 279	 * 1) They are referring to the same data extent:
 280	 *
 281	 * |<----- data extent A ----->|
 282	 *    |<- prev ->|<- next ->|
 283	 *
 284	 * 2) They are referring to different data extents but still adjacent:
 285	 *
 286	 * |<-- data extent A -->|<-- data extent B -->|
 287	 *            |<- prev ->|<- next ->|
 288	 *
 289	 * The calculation here always merges the data extents first, then updates
 290	 * @offset using the new data extents.
 291	 *
 292	 * For case 1), the merged data extent would be the same.
 293	 * For case 2), we just merge the two data extents into one.
 294	 */
 295	new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr);
 296	new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes,
 297				 next->disk_bytenr + next->disk_num_bytes) -
 298			     new_disk_bytenr;
 299	new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr;
 300
 301	merged->disk_bytenr = new_disk_bytenr;
 302	merged->disk_num_bytes = new_disk_num_bytes;
 303	merged->ram_bytes = new_disk_num_bytes;
 304	merged->offset = new_offset;
 305}
 306
 307static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix,
 308			    struct extent_map *em)
 309{
 310	if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
 311		return;
 312	btrfs_crit(fs_info,
 313"%s, start=%llu len=%llu disk_bytenr=%llu disk_num_bytes=%llu ram_bytes=%llu offset=%llu flags=0x%x",
 314		prefix, em->start, em->len, em->disk_bytenr, em->disk_num_bytes,
 315		em->ram_bytes, em->offset, em->flags);
 316	ASSERT(0);
 317}
 318
 319/* Internal sanity checks for btrfs debug builds. */
 320static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em)
 321{
 322	if (!IS_ENABLED(CONFIG_BTRFS_DEBUG))
 323		return;
 324	if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
 325		if (em->disk_num_bytes == 0)
 326			dump_extent_map(fs_info, "zero disk_num_bytes", em);
 327		if (em->offset + em->len > em->ram_bytes)
 328			dump_extent_map(fs_info, "ram_bytes too small", em);
 329		if (em->offset + em->len > em->disk_num_bytes &&
 330		    !extent_map_is_compressed(em))
 331			dump_extent_map(fs_info, "disk_num_bytes too small", em);
 332		if (!extent_map_is_compressed(em) &&
 333		    em->ram_bytes != em->disk_num_bytes)
 334			dump_extent_map(fs_info,
 335		"ram_bytes mismatch with disk_num_bytes for non-compressed em",
 336					em);
 337	} else if (em->offset) {
 338		dump_extent_map(fs_info, "non-zero offset for hole/inline", em);
 339	}
 
 340}
 341
 342static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
 343{
 344	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 345	struct extent_map *merge = NULL;
 346	struct rb_node *rb;
 347
 348	/*
 349	 * We can't modify an extent map that is in the tree and that is being
 350	 * used by another task, as it can cause that other task to see it in
 351	 * inconsistent state during the merging. We always have 1 reference for
 352	 * the tree and 1 for this task (which is unpinning the extent map or
 353	 * clearing the logging flag), so anything > 2 means it's being used by
 354	 * other tasks too.
 355	 */
 356	if (refcount_read(&em->refs) > 2)
 357		return;
 358
 359	if (!can_merge_extent_map(em))
 360		return;
 361
 362	if (em->start != 0) {
 363		rb = rb_prev(&em->rb_node);
 364		if (rb)
 365			merge = rb_entry(rb, struct extent_map, rb_node);
 366		if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
 367			em->start = merge->start;
 
 368			em->len += merge->len;
 
 
 
 
 369			em->generation = max(em->generation, merge->generation);
 
 370
 371			if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
 372				merge_ondisk_extents(merge, em, em);
 373			em->flags |= EXTENT_FLAG_MERGED;
 374
 375			validate_extent_map(fs_info, em);
 376			remove_em(inode, merge);
 377			free_extent_map(merge);
 378		}
 379	}
 380
 381	rb = rb_next(&em->rb_node);
 382	if (rb)
 383		merge = rb_entry(rb, struct extent_map, rb_node);
 384	if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
 385		em->len += merge->len;
 386		if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
 387			merge_ondisk_extents(em, merge, em);
 388		validate_extent_map(fs_info, em);
 
 389		em->generation = max(em->generation, merge->generation);
 390		em->flags |= EXTENT_FLAG_MERGED;
 391		remove_em(inode, merge);
 392		free_extent_map(merge);
 393	}
 394}
 395
 396/*
 397 * Unpin an extent from the cache.
 398 *
 399 * @inode:	the inode from which we are unpinning an extent range
 400 * @start:	logical offset in the file
 401 * @len:	length of the extent
 402 * @gen:	generation that this extent has been modified in
 403 *
 404 * Called after an extent has been written to disk properly.  Set the generation
 405 * to the generation that actually added the file item to the inode so we know
 406 * we need to sync this extent when we call fsync().
 407 *
 408 * Returns: 0	     on success
 409 * 	    -ENOENT  when the extent is not found in the tree
 410 * 	    -EUCLEAN if the found extent does not match the expected start
 411 */
 412int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
 
 413{
 414	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 415	struct extent_map_tree *tree = &inode->extent_tree;
 416	int ret = 0;
 417	struct extent_map *em;
 
 418
 419	write_lock(&tree->lock);
 420	em = lookup_extent_mapping(tree, start, len);
 421
 422	if (WARN_ON(!em)) {
 423		btrfs_warn(fs_info,
 424"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
 425			   btrfs_ino(inode), btrfs_root_id(inode->root),
 426			   start, start + len, gen);
 427		ret = -ENOENT;
 428		goto out;
 429	}
 430
 431	if (WARN_ON(em->start != start)) {
 432		btrfs_warn(fs_info,
 433"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
 434			   btrfs_ino(inode), btrfs_root_id(inode->root),
 435			   em->start, start, start + len, gen);
 436		ret = -EUCLEAN;
 437		goto out;
 
 438	}
 439
 440	em->generation = gen;
 441	em->flags &= ~EXTENT_FLAG_PINNED;
 442
 443	try_merge_map(inode, em);
 
 
 
 444
 
 445out:
 446	write_unlock(&tree->lock);
 447	free_extent_map(em);
 448	return ret;
 449
 450}
 451
 452void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em)
 453{
 454	lockdep_assert_held_write(&inode->extent_tree.lock);
 455
 456	em->flags &= ~EXTENT_FLAG_LOGGING;
 457	if (extent_map_in_tree(em))
 458		try_merge_map(inode, em);
 459}
 460
 461static inline void setup_extent_mapping(struct btrfs_inode *inode,
 462					struct extent_map *em,
 463					int modified)
 464{
 465	refcount_inc(&em->refs);
 466
 467	ASSERT(list_empty(&em->list));
 468
 469	if (modified)
 470		list_add(&em->list, &inode->extent_tree.modified_extents);
 471	else
 472		try_merge_map(inode, em);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473}
 474
 475/*
 476 * Add a new extent map to an inode's extent map tree.
 477 *
 478 * @inode:	the target inode
 479 * @em:		map to insert
 480 * @modified:	indicate whether the given @em should be added to the
 481 *	        modified list, which indicates the extent needs to be logged
 482 *
 483 * Insert @em into the @inode's extent map tree or perform a simple
 484 * forward/backward merge with existing mappings.  The extent_map struct passed
 485 * in will be inserted into the tree directly, with an additional reference
 486 * taken, or a reference dropped if the merge attempt was successful.
 487 */
 488static int add_extent_mapping(struct btrfs_inode *inode,
 489			      struct extent_map *em, int modified)
 490{
 491	struct extent_map_tree *tree = &inode->extent_tree;
 492	struct btrfs_root *root = inode->root;
 493	struct btrfs_fs_info *fs_info = root->fs_info;
 494	int ret;
 495
 496	lockdep_assert_held_write(&tree->lock);
 497
 498	validate_extent_map(fs_info, em);
 499	ret = tree_insert(&tree->root, em);
 500	if (ret)
 501		return ret;
 502
 503	setup_extent_mapping(inode, em, modified);
 504
 505	if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(root)))
 506		percpu_counter_inc(&fs_info->evictable_extent_maps);
 507
 508	return 0;
 
 509}
 510
 511static struct extent_map *
 512__lookup_extent_mapping(struct extent_map_tree *tree,
 513			u64 start, u64 len, int strict)
 514{
 515	struct extent_map *em;
 516	struct rb_node *rb_node;
 517	struct rb_node *prev_or_next = NULL;
 518	u64 end = range_end(start, len);
 519
 520	rb_node = __tree_search(&tree->root, start, &prev_or_next);
 521	if (!rb_node) {
 522		if (prev_or_next)
 523			rb_node = prev_or_next;
 524		else
 525			return NULL;
 526	}
 527
 528	em = rb_entry(rb_node, struct extent_map, rb_node);
 529
 530	if (strict && !(end > em->start && start < extent_map_end(em)))
 531		return NULL;
 532
 533	refcount_inc(&em->refs);
 534	return em;
 535}
 536
 537/*
 538 * Lookup extent_map that intersects @start + @len range.
 539 *
 540 * @tree:	tree to lookup in
 541 * @start:	byte offset to start the search
 542 * @len:	length of the lookup range
 543 *
 544 * Find and return the first extent_map struct in @tree that intersects the
 545 * [start, len] range.  There may be additional objects in the tree that
 546 * intersect, so check the object returned carefully to make sure that no
 547 * additional lookups are needed.
 548 */
 549struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
 550					 u64 start, u64 len)
 551{
 552	return __lookup_extent_mapping(tree, start, len, 1);
 553}
 554
 555/*
 556 * Find a nearby extent map intersecting @start + @len (not an exact search).
 557 *
 558 * @tree:	tree to lookup in
 559 * @start:	byte offset to start the search
 560 * @len:	length of the lookup range
 561 *
 562 * Find and return the first extent_map struct in @tree that intersects the
 563 * [start, len] range.
 564 *
 565 * If one can't be found, any nearby extent may be returned
 566 */
 567struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
 568					 u64 start, u64 len)
 569{
 570	return __lookup_extent_mapping(tree, start, len, 0);
 571}
 572
 573/*
 574 * Remove an extent_map from its inode's extent tree.
 575 *
 576 * @inode:	the inode the extent map belongs to
 577 * @em:		extent map being removed
 578 *
 579 * Remove @em from the extent tree of @inode.  No reference counts are dropped,
 580 * and no checks are done to see if the range is in use.
 581 */
 582void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
 583{
 584	struct extent_map_tree *tree = &inode->extent_tree;
 585
 586	lockdep_assert_held_write(&tree->lock);
 587
 588	WARN_ON(em->flags & EXTENT_FLAG_PINNED);
 589	if (!(em->flags & EXTENT_FLAG_LOGGING))
 
 590		list_del_init(&em->list);
 591
 592	remove_em(inode, em);
 
 593}
 594
 595static void replace_extent_mapping(struct btrfs_inode *inode,
 596				   struct extent_map *cur,
 597				   struct extent_map *new,
 598				   int modified)
 599{
 600	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 601	struct extent_map_tree *tree = &inode->extent_tree;
 602
 603	lockdep_assert_held_write(&tree->lock);
 604
 605	validate_extent_map(fs_info, new);
 606
 607	WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
 608	ASSERT(extent_map_in_tree(cur));
 609	if (!(cur->flags & EXTENT_FLAG_LOGGING))
 610		list_del_init(&cur->list);
 611	rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root);
 612	RB_CLEAR_NODE(&cur->rb_node);
 613
 614	setup_extent_mapping(inode, new, modified);
 615}
 616
 617static struct extent_map *next_extent_map(const struct extent_map *em)
 618{
 619	struct rb_node *next;
 620
 621	next = rb_next(&em->rb_node);
 622	if (!next)
 623		return NULL;
 624	return container_of(next, struct extent_map, rb_node);
 625}
 626
 627static struct extent_map *prev_extent_map(struct extent_map *em)
 628{
 629	struct rb_node *prev;
 630
 631	prev = rb_prev(&em->rb_node);
 632	if (!prev)
 633		return NULL;
 634	return container_of(prev, struct extent_map, rb_node);
 635}
 636
 637/*
 638 * Helper for btrfs_get_extent.  Given an existing extent in the tree,
 639 * the existing extent is the nearest extent to map_start,
 640 * and an extent that you want to insert, deal with overlap and insert
 641 * the best fitted new extent into the tree.
 642 */
 643static noinline int merge_extent_mapping(struct btrfs_inode *inode,
 644					 struct extent_map *existing,
 645					 struct extent_map *em,
 646					 u64 map_start)
 647{
 648	struct extent_map *prev;
 649	struct extent_map *next;
 650	u64 start;
 651	u64 end;
 652	u64 start_diff;
 653
 654	if (map_start < em->start || map_start >= extent_map_end(em))
 655		return -EINVAL;
 656
 657	if (existing->start > map_start) {
 658		next = existing;
 659		prev = prev_extent_map(next);
 660	} else {
 661		prev = existing;
 662		next = next_extent_map(prev);
 663	}
 664
 665	start = prev ? extent_map_end(prev) : em->start;
 666	start = max_t(u64, start, em->start);
 667	end = next ? next->start : extent_map_end(em);
 668	end = min_t(u64, end, extent_map_end(em));
 669	start_diff = start - em->start;
 670	em->start = start;
 671	em->len = end - start;
 672	if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
 673		em->offset += start_diff;
 674	return add_extent_mapping(inode, em, 0);
 
 
 
 675}
 676
 677/*
 678 * Add extent mapping into an inode's extent map tree.
 679 *
 680 * @inode:    target inode
 
 681 * @em_in:    extent we are inserting
 682 * @start:    start of the logical range btrfs_get_extent() is requesting
 683 * @len:      length of the logical range btrfs_get_extent() is requesting
 684 *
 685 * Note that @em_in's range may be different from [start, start+len),
 686 * but they must be overlapped.
 687 *
 688 * Insert @em_in into the inode's extent map tree. In case there is an
 689 * overlapping range, handle the -EEXIST by either:
 690 * a) Returning the existing extent in @em_in if @start is within the
 691 *    existing em.
 692 * b) Merge the existing extent with @em_in passed in.
 693 *
 694 * Return 0 on success, otherwise -EEXIST.
 695 *
 696 */
 697int btrfs_add_extent_mapping(struct btrfs_inode *inode,
 
 698			     struct extent_map **em_in, u64 start, u64 len)
 699{
 700	int ret;
 701	struct extent_map *em = *em_in;
 702	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 703
 704	/*
 705	 * Tree-checker should have rejected any inline extent with non-zero
 706	 * file offset. Here just do a sanity check.
 707	 */
 708	if (em->disk_bytenr == EXTENT_MAP_INLINE)
 709		ASSERT(em->start == 0);
 710
 711	ret = add_extent_mapping(inode, em, 0);
 712	/* it is possible that someone inserted the extent into the tree
 713	 * while we had the lock dropped.  It is also possible that
 714	 * an overlapping map exists in the tree
 715	 */
 716	if (ret == -EEXIST) {
 717		struct extent_map *existing;
 718
 719		existing = search_extent_mapping(&inode->extent_tree, start, len);
 
 
 720
 721		trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
 722
 723		/*
 724		 * existing will always be non-NULL, since there must be
 725		 * extent causing the -EEXIST.
 726		 */
 727		if (start >= existing->start &&
 728		    start < extent_map_end(existing)) {
 729			free_extent_map(em);
 730			*em_in = existing;
 731			ret = 0;
 732		} else {
 733			u64 orig_start = em->start;
 734			u64 orig_len = em->len;
 735
 736			/*
 737			 * The existing extent map is the one nearest to
 738			 * the [start, start + len) range which overlaps
 739			 */
 740			ret = merge_extent_mapping(inode, existing, em, start);
 741			if (WARN_ON(ret)) {
 
 742				free_extent_map(em);
 743				*em_in = NULL;
 744				btrfs_warn(fs_info,
 745"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
 746					   existing->start, extent_map_end(existing),
 747					   orig_start, orig_start + orig_len, start);
 748			}
 749			free_extent_map(existing);
 750		}
 751	}
 752
 753	ASSERT(ret == 0 || ret == -EEXIST);
 754	return ret;
 755}
 756
 757/*
 758 * Drop all extent maps from a tree in the fastest possible way, rescheduling
 759 * if needed. This avoids searching the tree, from the root down to the first
 760 * extent map, before each deletion.
 761 */
 762static void drop_all_extent_maps_fast(struct btrfs_inode *inode)
 763{
 764	struct extent_map_tree *tree = &inode->extent_tree;
 765	struct rb_node *node;
 766
 767	write_lock(&tree->lock);
 768	node = rb_first(&tree->root);
 769	while (node) {
 770		struct extent_map *em;
 771		struct rb_node *next = rb_next(node);
 772
 
 773		em = rb_entry(node, struct extent_map, rb_node);
 774		em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
 775		remove_extent_mapping(inode, em);
 
 776		free_extent_map(em);
 777
 778		if (cond_resched_rwlock_write(&tree->lock))
 779			node = rb_first(&tree->root);
 780		else
 781			node = next;
 782	}
 783	write_unlock(&tree->lock);
 784}
 785
 786/*
 787 * Drop all extent maps in a given range.
 788 *
 789 * @inode:       The target inode.
 790 * @start:       Start offset of the range.
 791 * @end:         End offset of the range (inclusive value).
 792 * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
 793 *
 794 * This drops all the extent maps that intersect the given range [@start, @end].
 795 * Extent maps that partially overlap the range and extend behind or beyond it,
 796 * are split.
 797 * The caller should have locked an appropriate file range in the inode's io
 798 * tree before calling this function.
 799 */
 800void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
 801				 bool skip_pinned)
 802{
 803	struct extent_map *split;
 804	struct extent_map *split2;
 805	struct extent_map *em;
 806	struct extent_map_tree *em_tree = &inode->extent_tree;
 807	u64 len = end - start + 1;
 808
 809	WARN_ON(end < start);
 810	if (end == (u64)-1) {
 811		if (start == 0 && !skip_pinned) {
 812			drop_all_extent_maps_fast(inode);
 813			return;
 814		}
 815		len = (u64)-1;
 816	} else {
 817		/* Make end offset exclusive for use in the loop below. */
 818		end++;
 819	}
 820
 821	/*
 822	 * It's ok if we fail to allocate the extent maps, see the comment near
 823	 * the bottom of the loop below. We only need two spare extent maps in
 824	 * the worst case, where the first extent map that intersects our range
 825	 * starts before the range and the last extent map that intersects our
 826	 * range ends after our range (and they might be the same extent map),
 827	 * because we need to split those two extent maps at the boundaries.
 828	 */
 829	split = alloc_extent_map();
 830	split2 = alloc_extent_map();
 831
 832	write_lock(&em_tree->lock);
 833	em = lookup_extent_mapping(em_tree, start, len);
 834
 835	while (em) {
 836		/* extent_map_end() returns exclusive value (last byte + 1). */
 837		const u64 em_end = extent_map_end(em);
 838		struct extent_map *next_em = NULL;
 839		u64 gen;
 840		unsigned long flags;
 841		bool modified;
 
 842
 843		if (em_end < end) {
 844			next_em = next_extent_map(em);
 845			if (next_em) {
 846				if (next_em->start < end)
 847					refcount_inc(&next_em->refs);
 848				else
 849					next_em = NULL;
 850			}
 851		}
 852
 853		if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) {
 854			start = em_end;
 
 
 855			goto next;
 856		}
 857
 858		flags = em->flags;
 859		/*
 860		 * In case we split the extent map, we want to preserve the
 861		 * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
 862		 * it on the new extent maps.
 863		 */
 864		em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
 865		modified = !list_empty(&em->list);
 866
 867		/*
 868		 * The extent map does not cross our target range, so no need to
 869		 * split it, we can remove it directly.
 870		 */
 871		if (em->start >= start && em_end <= end)
 872			goto remove_em;
 873
 
 874		gen = em->generation;
 
 875
 876		if (em->start < start) {
 877			if (!split) {
 878				split = split2;
 879				split2 = NULL;
 880				if (!split)
 881					goto remove_em;
 882			}
 883			split->start = em->start;
 884			split->len = start - em->start;
 885
 886			if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
 887				split->disk_bytenr = em->disk_bytenr;
 888				split->disk_num_bytes = em->disk_num_bytes;
 889				split->offset = em->offset;
 
 
 
 
 
 
 890				split->ram_bytes = em->ram_bytes;
 891			} else {
 892				split->disk_bytenr = em->disk_bytenr;
 893				split->disk_num_bytes = 0;
 894				split->offset = 0;
 
 895				split->ram_bytes = split->len;
 896			}
 897
 898			split->generation = gen;
 899			split->flags = flags;
 900			replace_extent_mapping(inode, em, split, modified);
 
 901			free_extent_map(split);
 902			split = split2;
 903			split2 = NULL;
 904		}
 905		if (em_end > end) {
 906			if (!split) {
 907				split = split2;
 908				split2 = NULL;
 909				if (!split)
 910					goto remove_em;
 911			}
 912			split->start = end;
 913			split->len = em_end - end;
 914			split->disk_bytenr = em->disk_bytenr;
 915			split->flags = flags;
 
 916			split->generation = gen;
 917
 918			if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) {
 919				split->disk_num_bytes = em->disk_num_bytes;
 920				split->offset = em->offset + end - em->start;
 
 921				split->ram_bytes = em->ram_bytes;
 
 
 
 
 
 
 
 
 
 
 922			} else {
 923				split->disk_num_bytes = 0;
 924				split->offset = 0;
 925				split->ram_bytes = split->len;
 
 
 
 926			}
 927
 928			if (extent_map_in_tree(em)) {
 929				replace_extent_mapping(inode, em, split, modified);
 
 930			} else {
 931				int ret;
 932
 933				ret = add_extent_mapping(inode, split, modified);
 
 934				/* Logic error, shouldn't happen. */
 935				ASSERT(ret == 0);
 936				if (WARN_ON(ret != 0) && modified)
 937					btrfs_set_inode_full_sync(inode);
 938			}
 939			free_extent_map(split);
 940			split = NULL;
 941		}
 942remove_em:
 943		if (extent_map_in_tree(em)) {
 944			/*
 945			 * If the extent map is still in the tree it means that
 946			 * either of the following is true:
 947			 *
 948			 * 1) It fits entirely in our range (doesn't end beyond
 949			 *    it or starts before it);
 950			 *
 951			 * 2) It starts before our range and/or ends after our
 952			 *    range, and we were not able to allocate the extent
 953			 *    maps for split operations, @split and @split2.
 954			 *
 955			 * If we are at case 2) then we just remove the entire
 956			 * extent map - this is fine since if anyone needs it to
 957			 * access the subranges outside our range, will just
 958			 * load it again from the subvolume tree's file extent
 959			 * item. However if the extent map was in the list of
 960			 * modified extents, then we must mark the inode for a
 961			 * full fsync, otherwise a fast fsync will miss this
 962			 * extent if it's new and needs to be logged.
 963			 */
 964			if ((em->start < start || em_end > end) && modified) {
 965				ASSERT(!split);
 966				btrfs_set_inode_full_sync(inode);
 967			}
 968			remove_extent_mapping(inode, em);
 969		}
 970
 971		/*
 972		 * Once for the tree reference (we replaced or removed the
 973		 * extent map from the tree).
 974		 */
 975		free_extent_map(em);
 976next:
 977		/* Once for us (for our lookup reference). */
 978		free_extent_map(em);
 979
 980		em = next_em;
 981	}
 982
 983	write_unlock(&em_tree->lock);
 984
 985	free_extent_map(split);
 986	free_extent_map(split2);
 987}
 988
 989/*
 990 * Replace a range in the inode's extent map tree with a new extent map.
 991 *
 992 * @inode:      The target inode.
 993 * @new_em:     The new extent map to add to the inode's extent map tree.
 994 * @modified:   Indicate if the new extent map should be added to the list of
 995 *              modified extents (for fast fsync tracking).
 996 *
 997 * Drops all the extent maps in the inode's extent map tree that intersect the
 998 * range of the new extent map and adds the new extent map to the tree.
 999 * The caller should have locked an appropriate file range in the inode's io
1000 * tree before calling this function.
1001 */
1002int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
1003				   struct extent_map *new_em,
1004				   bool modified)
1005{
1006	const u64 end = new_em->start + new_em->len - 1;
1007	struct extent_map_tree *tree = &inode->extent_tree;
1008	int ret;
1009
1010	ASSERT(!extent_map_in_tree(new_em));
1011
1012	/*
1013	 * The caller has locked an appropriate file range in the inode's io
1014	 * tree, but getting -EEXIST when adding the new extent map can still
1015	 * happen in case there are extents that partially cover the range, and
1016	 * this is due to two tasks operating on different parts of the extent.
1017	 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
1018	 * btrfs_get_extent") for an example and details.
1019	 */
1020	do {
1021		btrfs_drop_extent_map_range(inode, new_em->start, end, false);
1022		write_lock(&tree->lock);
1023		ret = add_extent_mapping(inode, new_em, modified);
1024		write_unlock(&tree->lock);
1025	} while (ret == -EEXIST);
1026
1027	return ret;
1028}
1029
1030/*
1031 * Split off the first pre bytes from the extent_map at [start, start + len],
1032 * and set the block_start for it to new_logical.
1033 *
1034 * This function is used when an ordered_extent needs to be split.
1035 */
1036int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
1037		     u64 new_logical)
1038{
1039	struct extent_map_tree *em_tree = &inode->extent_tree;
1040	struct extent_map *em;
1041	struct extent_map *split_pre = NULL;
1042	struct extent_map *split_mid = NULL;
1043	int ret = 0;
1044	unsigned long flags;
1045
1046	ASSERT(pre != 0);
1047	ASSERT(pre < len);
1048
1049	split_pre = alloc_extent_map();
1050	if (!split_pre)
1051		return -ENOMEM;
1052	split_mid = alloc_extent_map();
1053	if (!split_mid) {
1054		ret = -ENOMEM;
1055		goto out_free_pre;
1056	}
1057
1058	lock_extent(&inode->io_tree, start, start + len - 1, NULL);
1059	write_lock(&em_tree->lock);
1060	em = lookup_extent_mapping(em_tree, start, len);
1061	if (!em) {
1062		ret = -EIO;
1063		goto out_unlock;
1064	}
1065
1066	ASSERT(em->len == len);
1067	ASSERT(!extent_map_is_compressed(em));
1068	ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE);
1069	ASSERT(em->flags & EXTENT_FLAG_PINNED);
1070	ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
1071	ASSERT(!list_empty(&em->list));
1072
1073	flags = em->flags;
1074	em->flags &= ~EXTENT_FLAG_PINNED;
1075
1076	/* First, replace the em with a new extent_map starting from * em->start */
1077	split_pre->start = em->start;
1078	split_pre->len = pre;
1079	split_pre->disk_bytenr = new_logical;
1080	split_pre->disk_num_bytes = split_pre->len;
1081	split_pre->offset = 0;
1082	split_pre->ram_bytes = split_pre->len;
1083	split_pre->flags = flags;
1084	split_pre->generation = em->generation;
1085
1086	replace_extent_mapping(inode, em, split_pre, 1);
1087
1088	/*
1089	 * Now we only have an extent_map at:
1090	 *     [em->start, em->start + pre]
1091	 */
1092
1093	/* Insert the middle extent_map. */
1094	split_mid->start = em->start + pre;
1095	split_mid->len = em->len - pre;
1096	split_mid->disk_bytenr = extent_map_block_start(em) + pre;
1097	split_mid->disk_num_bytes = split_mid->len;
1098	split_mid->offset = 0;
1099	split_mid->ram_bytes = split_mid->len;
1100	split_mid->flags = flags;
1101	split_mid->generation = em->generation;
1102	add_extent_mapping(inode, split_mid, 1);
1103
1104	/* Once for us */
1105	free_extent_map(em);
1106	/* Once for the tree */
1107	free_extent_map(em);
1108
1109out_unlock:
1110	write_unlock(&em_tree->lock);
1111	unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
1112	free_extent_map(split_mid);
1113out_free_pre:
1114	free_extent_map(split_pre);
1115	return ret;
1116}
1117
1118struct btrfs_em_shrink_ctx {
1119	long nr_to_scan;
1120	long scanned;
1121};
1122
1123static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx)
1124{
1125	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1126	const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info);
1127	struct extent_map_tree *tree = &inode->extent_tree;
1128	long nr_dropped = 0;
1129	struct rb_node *node;
1130
1131	lockdep_assert_held_write(&tree->lock);
1132
1133	/*
1134	 * Take the mmap lock so that we serialize with the inode logging phase
1135	 * of fsync because we may need to set the full sync flag on the inode,
1136	 * in case we have to remove extent maps in the tree's list of modified
1137	 * extents. If we set the full sync flag in the inode while an fsync is
1138	 * in progress, we may risk missing new extents because before the flag
1139	 * is set, fsync decides to only wait for writeback to complete and then
1140	 * during inode logging it sees the flag set and uses the subvolume tree
1141	 * to find new extents, which may not be there yet because ordered
1142	 * extents haven't completed yet.
1143	 *
1144	 * We also do a try lock because we don't want to block for too long and
1145	 * we are holding the extent map tree's lock in write mode.
1146	 */
1147	if (!down_read_trylock(&inode->i_mmap_lock))
1148		return 0;
1149
1150	node = rb_first(&tree->root);
1151	while (node) {
1152		struct rb_node *next = rb_next(node);
1153		struct extent_map *em;
1154
1155		em = rb_entry(node, struct extent_map, rb_node);
1156		ctx->scanned++;
1157
1158		if (em->flags & EXTENT_FLAG_PINNED)
1159			goto next;
1160
1161		/*
1162		 * If the inode is in the list of modified extents (new) and its
1163		 * generation is the same (or is greater than) the current fs
1164		 * generation, it means it was not yet persisted so we have to
1165		 * set the full sync flag so that the next fsync will not miss
1166		 * it.
1167		 */
1168		if (!list_empty(&em->list) && em->generation >= cur_fs_gen)
1169			btrfs_set_inode_full_sync(inode);
1170
1171		remove_extent_mapping(inode, em);
1172		trace_btrfs_extent_map_shrinker_remove_em(inode, em);
1173		/* Drop the reference for the tree. */
1174		free_extent_map(em);
1175		nr_dropped++;
1176next:
1177		if (ctx->scanned >= ctx->nr_to_scan)
1178			break;
1179
1180		/*
1181		 * Stop if we need to reschedule or there's contention on the
1182		 * lock. This is to avoid slowing other tasks trying to take the
1183		 * lock.
1184		 */
1185		if (need_resched() || rwlock_needbreak(&tree->lock) ||
1186		    btrfs_fs_closing(fs_info))
1187			break;
1188		node = next;
1189	}
1190	up_read(&inode->i_mmap_lock);
1191
1192	return nr_dropped;
1193}
1194
1195static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
1196						      u64 min_ino)
1197{
1198	struct btrfs_inode *inode;
1199	unsigned long from = min_ino;
1200
1201	xa_lock(&root->inodes);
1202	while (true) {
1203		struct extent_map_tree *tree;
1204
1205		inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
1206		if (!inode)
1207			break;
1208
1209		tree = &inode->extent_tree;
1210
1211		/*
1212		 * We want to be fast so if the lock is busy we don't want to
1213		 * spend time waiting for it (some task is about to do IO for
1214		 * the inode).
1215		 */
1216		if (!write_trylock(&tree->lock))
1217			goto next;
1218
1219		/*
1220		 * Skip inode if it doesn't have loaded extent maps, so we avoid
1221		 * getting a reference and doing an iput later. This includes
1222		 * cases like files that were opened for things like stat(2), or
1223		 * files with all extent maps previously released through the
1224		 * release folio callback (btrfs_release_folio()) or released in
1225		 * a previous run, or directories which never have extent maps.
1226		 */
1227		if (RB_EMPTY_ROOT(&tree->root)) {
1228			write_unlock(&tree->lock);
1229			goto next;
1230		}
1231
1232		if (igrab(&inode->vfs_inode))
1233			break;
1234
1235		write_unlock(&tree->lock);
1236next:
1237		from = btrfs_ino(inode) + 1;
1238		cond_resched_lock(&root->inodes.xa_lock);
1239	}
1240	xa_unlock(&root->inodes);
1241
1242	return inode;
1243}
1244
1245static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
1246{
1247	struct btrfs_fs_info *fs_info = root->fs_info;
1248	struct btrfs_inode *inode;
1249	long nr_dropped = 0;
1250	u64 min_ino = fs_info->em_shrinker_last_ino + 1;
1251
1252	inode = find_first_inode_to_shrink(root, min_ino);
1253	while (inode) {
1254		nr_dropped += btrfs_scan_inode(inode, ctx);
1255		write_unlock(&inode->extent_tree.lock);
1256
1257		min_ino = btrfs_ino(inode) + 1;
1258		fs_info->em_shrinker_last_ino = btrfs_ino(inode);
1259		iput(&inode->vfs_inode);
1260
1261		if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
1262			break;
1263
1264		cond_resched();
1265
1266		inode = find_first_inode_to_shrink(root, min_ino);
1267	}
1268
1269	if (inode) {
1270		/*
1271		 * There are still inodes in this root or we happened to process
1272		 * the last one and reached the scan limit. In either case set
1273		 * the current root to this one, so we'll resume from the next
1274		 * inode if there is one or we will find out this was the last
1275		 * one and move to the next root.
1276		 */
1277		fs_info->em_shrinker_last_root = btrfs_root_id(root);
1278	} else {
1279		/*
1280		 * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
1281		 * that when processing the next root we start from its first inode.
1282		 */
1283		fs_info->em_shrinker_last_ino = 0;
1284		fs_info->em_shrinker_last_root = btrfs_root_id(root) + 1;
1285	}
1286
1287	return nr_dropped;
1288}
1289
1290static void btrfs_extent_map_shrinker_worker(struct work_struct *work)
1291{
1292	struct btrfs_fs_info *fs_info;
1293	struct btrfs_em_shrink_ctx ctx;
1294	u64 start_root_id;
1295	u64 next_root_id;
1296	bool cycled = false;
1297	long nr_dropped = 0;
1298
1299	fs_info = container_of(work, struct btrfs_fs_info, em_shrinker_work);
1300
1301	ctx.scanned = 0;
1302	ctx.nr_to_scan = atomic64_read(&fs_info->em_shrinker_nr_to_scan);
1303
1304	start_root_id = fs_info->em_shrinker_last_root;
1305	next_root_id = fs_info->em_shrinker_last_root;
1306
1307	if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
1308		s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
1309
1310		trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr);
1311	}
1312
1313	while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) {
1314		struct btrfs_root *root;
1315		unsigned long count;
1316
1317		cond_resched();
1318
1319		spin_lock(&fs_info->fs_roots_radix_lock);
1320		count = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1321					       (void **)&root,
1322					       (unsigned long)next_root_id, 1);
1323		if (count == 0) {
1324			spin_unlock(&fs_info->fs_roots_radix_lock);
1325			if (start_root_id > 0 && !cycled) {
1326				next_root_id = 0;
1327				fs_info->em_shrinker_last_root = 0;
1328				fs_info->em_shrinker_last_ino = 0;
1329				cycled = true;
1330				continue;
1331			}
1332			break;
1333		}
1334		next_root_id = btrfs_root_id(root) + 1;
1335		root = btrfs_grab_root(root);
1336		spin_unlock(&fs_info->fs_roots_radix_lock);
1337
1338		if (!root)
1339			continue;
1340
1341		if (is_fstree(btrfs_root_id(root)))
1342			nr_dropped += btrfs_scan_root(root, &ctx);
1343
1344		btrfs_put_root(root);
1345	}
1346
1347	if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
1348		s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
1349
1350		trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
1351	}
1352
1353	atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
1354}
1355
1356void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
1357{
1358	/*
1359	 * Do nothing if the shrinker is already running. In case of high memory
1360	 * pressure we can have a lot of tasks calling us and all passing the
1361	 * same nr_to_scan value, but in reality we may need only to free
1362	 * nr_to_scan extent maps (or less). In case we need to free more than
1363	 * that, we will be called again by the fs shrinker, so no worries about
1364	 * not doing enough work to reclaim memory from extent maps.
1365	 * We can also be repeatedly called with the same nr_to_scan value
1366	 * simply because the shrinker runs asynchronously and multiple calls
1367	 * to this function are made before the shrinker does enough progress.
1368	 *
1369	 * That's why we set the atomic counter to nr_to_scan only if its
1370	 * current value is zero, instead of incrementing the counter by
1371	 * nr_to_scan.
1372	 */
1373	if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
1374		return;
1375
1376	queue_work(system_unbound_wq, &fs_info->em_shrinker_work);
1377}
1378
1379void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)
1380{
1381	atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0);
1382	INIT_WORK(&fs_info->em_shrinker_work, btrfs_extent_map_shrinker_worker);
1383}