Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/err.h>
4#include <linux/slab.h>
5#include <linux/spinlock.h>
6#include "messages.h"
7#include "ctree.h"
8#include "volumes.h"
9#include "extent_map.h"
10#include "compression.h"
11#include "btrfs_inode.h"
12
13
14static struct kmem_cache *extent_map_cache;
15
16int __init extent_map_init(void)
17{
18 extent_map_cache = kmem_cache_create("btrfs_extent_map",
19 sizeof(struct extent_map), 0,
20 SLAB_MEM_SPREAD, NULL);
21 if (!extent_map_cache)
22 return -ENOMEM;
23 return 0;
24}
25
26void __cold extent_map_exit(void)
27{
28 kmem_cache_destroy(extent_map_cache);
29}
30
31/*
32 * Initialize the extent tree @tree. Should be called for each new inode or
33 * other user of the extent_map interface.
34 */
35void extent_map_tree_init(struct extent_map_tree *tree)
36{
37 tree->map = RB_ROOT_CACHED;
38 INIT_LIST_HEAD(&tree->modified_extents);
39 rwlock_init(&tree->lock);
40}
41
42/*
43 * Allocate a new extent_map structure. The new structure is returned with a
44 * reference count of one and needs to be freed using free_extent_map()
45 */
46struct extent_map *alloc_extent_map(void)
47{
48 struct extent_map *em;
49 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
50 if (!em)
51 return NULL;
52 RB_CLEAR_NODE(&em->rb_node);
53 em->compress_type = BTRFS_COMPRESS_NONE;
54 refcount_set(&em->refs, 1);
55 INIT_LIST_HEAD(&em->list);
56 return em;
57}
58
59/*
60 * Drop the reference out on @em by one and free the structure if the reference
61 * count hits zero.
62 */
63void free_extent_map(struct extent_map *em)
64{
65 if (!em)
66 return;
67 if (refcount_dec_and_test(&em->refs)) {
68 WARN_ON(extent_map_in_tree(em));
69 WARN_ON(!list_empty(&em->list));
70 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
71 kfree(em->map_lookup);
72 kmem_cache_free(extent_map_cache, em);
73 }
74}
75
76/* Do the math around the end of an extent, handling wrapping. */
77static u64 range_end(u64 start, u64 len)
78{
79 if (start + len < start)
80 return (u64)-1;
81 return start + len;
82}
83
84static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
85{
86 struct rb_node **p = &root->rb_root.rb_node;
87 struct rb_node *parent = NULL;
88 struct extent_map *entry = NULL;
89 struct rb_node *orig_parent = NULL;
90 u64 end = range_end(em->start, em->len);
91 bool leftmost = true;
92
93 while (*p) {
94 parent = *p;
95 entry = rb_entry(parent, struct extent_map, rb_node);
96
97 if (em->start < entry->start) {
98 p = &(*p)->rb_left;
99 } else if (em->start >= extent_map_end(entry)) {
100 p = &(*p)->rb_right;
101 leftmost = false;
102 } else {
103 return -EEXIST;
104 }
105 }
106
107 orig_parent = parent;
108 while (parent && em->start >= extent_map_end(entry)) {
109 parent = rb_next(parent);
110 entry = rb_entry(parent, struct extent_map, rb_node);
111 }
112 if (parent)
113 if (end > entry->start && em->start < extent_map_end(entry))
114 return -EEXIST;
115
116 parent = orig_parent;
117 entry = rb_entry(parent, struct extent_map, rb_node);
118 while (parent && em->start < entry->start) {
119 parent = rb_prev(parent);
120 entry = rb_entry(parent, struct extent_map, rb_node);
121 }
122 if (parent)
123 if (end > entry->start && em->start < extent_map_end(entry))
124 return -EEXIST;
125
126 rb_link_node(&em->rb_node, orig_parent, p);
127 rb_insert_color_cached(&em->rb_node, root, leftmost);
128 return 0;
129}
130
131/*
132 * Search through the tree for an extent_map with a given offset. If it can't
133 * be found, try to find some neighboring extents
134 */
135static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
136 struct rb_node **prev_or_next_ret)
137{
138 struct rb_node *n = root->rb_node;
139 struct rb_node *prev = NULL;
140 struct rb_node *orig_prev = NULL;
141 struct extent_map *entry;
142 struct extent_map *prev_entry = NULL;
143
144 ASSERT(prev_or_next_ret);
145
146 while (n) {
147 entry = rb_entry(n, struct extent_map, rb_node);
148 prev = n;
149 prev_entry = entry;
150
151 if (offset < entry->start)
152 n = n->rb_left;
153 else if (offset >= extent_map_end(entry))
154 n = n->rb_right;
155 else
156 return n;
157 }
158
159 orig_prev = prev;
160 while (prev && offset >= extent_map_end(prev_entry)) {
161 prev = rb_next(prev);
162 prev_entry = rb_entry(prev, struct extent_map, rb_node);
163 }
164
165 /*
166 * Previous extent map found, return as in this case the caller does not
167 * care about the next one.
168 */
169 if (prev) {
170 *prev_or_next_ret = prev;
171 return NULL;
172 }
173
174 prev = orig_prev;
175 prev_entry = rb_entry(prev, struct extent_map, rb_node);
176 while (prev && offset < prev_entry->start) {
177 prev = rb_prev(prev);
178 prev_entry = rb_entry(prev, struct extent_map, rb_node);
179 }
180 *prev_or_next_ret = prev;
181
182 return NULL;
183}
184
185/* Check to see if two extent_map structs are adjacent and safe to merge. */
186static int mergable_maps(struct extent_map *prev, struct extent_map *next)
187{
188 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
189 return 0;
190
191 /*
192 * don't merge compressed extents, we need to know their
193 * actual size
194 */
195 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
196 return 0;
197
198 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
199 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
200 return 0;
201
202 /*
203 * We don't want to merge stuff that hasn't been written to the log yet
204 * since it may not reflect exactly what is on disk, and that would be
205 * bad.
206 */
207 if (!list_empty(&prev->list) || !list_empty(&next->list))
208 return 0;
209
210 ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
211 prev->block_start != EXTENT_MAP_DELALLOC);
212
213 if (prev->map_lookup || next->map_lookup)
214 ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
215 test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
216
217 if (extent_map_end(prev) == next->start &&
218 prev->flags == next->flags &&
219 prev->map_lookup == next->map_lookup &&
220 ((next->block_start == EXTENT_MAP_HOLE &&
221 prev->block_start == EXTENT_MAP_HOLE) ||
222 (next->block_start == EXTENT_MAP_INLINE &&
223 prev->block_start == EXTENT_MAP_INLINE) ||
224 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
225 next->block_start == extent_map_block_end(prev)))) {
226 return 1;
227 }
228 return 0;
229}
230
231static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
232{
233 struct extent_map *merge = NULL;
234 struct rb_node *rb;
235
236 /*
237 * We can't modify an extent map that is in the tree and that is being
238 * used by another task, as it can cause that other task to see it in
239 * inconsistent state during the merging. We always have 1 reference for
240 * the tree and 1 for this task (which is unpinning the extent map or
241 * clearing the logging flag), so anything > 2 means it's being used by
242 * other tasks too.
243 */
244 if (refcount_read(&em->refs) > 2)
245 return;
246
247 if (em->start != 0) {
248 rb = rb_prev(&em->rb_node);
249 if (rb)
250 merge = rb_entry(rb, struct extent_map, rb_node);
251 if (rb && mergable_maps(merge, em)) {
252 em->start = merge->start;
253 em->orig_start = merge->orig_start;
254 em->len += merge->len;
255 em->block_len += merge->block_len;
256 em->block_start = merge->block_start;
257 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
258 em->mod_start = merge->mod_start;
259 em->generation = max(em->generation, merge->generation);
260 set_bit(EXTENT_FLAG_MERGED, &em->flags);
261
262 rb_erase_cached(&merge->rb_node, &tree->map);
263 RB_CLEAR_NODE(&merge->rb_node);
264 free_extent_map(merge);
265 }
266 }
267
268 rb = rb_next(&em->rb_node);
269 if (rb)
270 merge = rb_entry(rb, struct extent_map, rb_node);
271 if (rb && mergable_maps(em, merge)) {
272 em->len += merge->len;
273 em->block_len += merge->block_len;
274 rb_erase_cached(&merge->rb_node, &tree->map);
275 RB_CLEAR_NODE(&merge->rb_node);
276 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
277 em->generation = max(em->generation, merge->generation);
278 set_bit(EXTENT_FLAG_MERGED, &em->flags);
279 free_extent_map(merge);
280 }
281}
282
283/*
284 * Unpin an extent from the cache.
285 *
286 * @tree: tree to unpin the extent in
287 * @start: logical offset in the file
288 * @len: length of the extent
289 * @gen: generation that this extent has been modified in
290 *
291 * Called after an extent has been written to disk properly. Set the generation
292 * to the generation that actually added the file item to the inode so we know
293 * we need to sync this extent when we call fsync().
294 */
295int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
296 u64 gen)
297{
298 int ret = 0;
299 struct extent_map *em;
300 bool prealloc = false;
301
302 write_lock(&tree->lock);
303 em = lookup_extent_mapping(tree, start, len);
304
305 WARN_ON(!em || em->start != start);
306
307 if (!em)
308 goto out;
309
310 em->generation = gen;
311 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
312 em->mod_start = em->start;
313 em->mod_len = em->len;
314
315 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
316 prealloc = true;
317 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
318 }
319
320 try_merge_map(tree, em);
321
322 if (prealloc) {
323 em->mod_start = em->start;
324 em->mod_len = em->len;
325 }
326
327 free_extent_map(em);
328out:
329 write_unlock(&tree->lock);
330 return ret;
331
332}
333
334void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
335{
336 lockdep_assert_held_write(&tree->lock);
337
338 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
339 if (extent_map_in_tree(em))
340 try_merge_map(tree, em);
341}
342
343static inline void setup_extent_mapping(struct extent_map_tree *tree,
344 struct extent_map *em,
345 int modified)
346{
347 refcount_inc(&em->refs);
348 em->mod_start = em->start;
349 em->mod_len = em->len;
350
351 if (modified)
352 list_move(&em->list, &tree->modified_extents);
353 else
354 try_merge_map(tree, em);
355}
356
357static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
358{
359 struct map_lookup *map = em->map_lookup;
360 u64 stripe_size = em->orig_block_len;
361 int i;
362
363 for (i = 0; i < map->num_stripes; i++) {
364 struct btrfs_io_stripe *stripe = &map->stripes[i];
365 struct btrfs_device *device = stripe->dev;
366
367 set_extent_bits_nowait(&device->alloc_state, stripe->physical,
368 stripe->physical + stripe_size - 1, bits);
369 }
370}
371
372static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
373{
374 struct map_lookup *map = em->map_lookup;
375 u64 stripe_size = em->orig_block_len;
376 int i;
377
378 for (i = 0; i < map->num_stripes; i++) {
379 struct btrfs_io_stripe *stripe = &map->stripes[i];
380 struct btrfs_device *device = stripe->dev;
381
382 __clear_extent_bit(&device->alloc_state, stripe->physical,
383 stripe->physical + stripe_size - 1, bits,
384 NULL, GFP_NOWAIT, NULL);
385 }
386}
387
388/*
389 * Add new extent map to the extent tree
390 *
391 * @tree: tree to insert new map in
392 * @em: map to insert
393 * @modified: indicate whether the given @em should be added to the
394 * modified list, which indicates the extent needs to be logged
395 *
396 * Insert @em into @tree or perform a simple forward/backward merge with
397 * existing mappings. The extent_map struct passed in will be inserted
398 * into the tree directly, with an additional reference taken, or a
399 * reference dropped if the merge attempt was successful.
400 */
401int add_extent_mapping(struct extent_map_tree *tree,
402 struct extent_map *em, int modified)
403{
404 int ret = 0;
405
406 lockdep_assert_held_write(&tree->lock);
407
408 ret = tree_insert(&tree->map, em);
409 if (ret)
410 goto out;
411
412 setup_extent_mapping(tree, em, modified);
413 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
414 extent_map_device_set_bits(em, CHUNK_ALLOCATED);
415 extent_map_device_clear_bits(em, CHUNK_TRIMMED);
416 }
417out:
418 return ret;
419}
420
421static struct extent_map *
422__lookup_extent_mapping(struct extent_map_tree *tree,
423 u64 start, u64 len, int strict)
424{
425 struct extent_map *em;
426 struct rb_node *rb_node;
427 struct rb_node *prev_or_next = NULL;
428 u64 end = range_end(start, len);
429
430 rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
431 if (!rb_node) {
432 if (prev_or_next)
433 rb_node = prev_or_next;
434 else
435 return NULL;
436 }
437
438 em = rb_entry(rb_node, struct extent_map, rb_node);
439
440 if (strict && !(end > em->start && start < extent_map_end(em)))
441 return NULL;
442
443 refcount_inc(&em->refs);
444 return em;
445}
446
447/*
448 * Lookup extent_map that intersects @start + @len range.
449 *
450 * @tree: tree to lookup in
451 * @start: byte offset to start the search
452 * @len: length of the lookup range
453 *
454 * Find and return the first extent_map struct in @tree that intersects the
455 * [start, len] range. There may be additional objects in the tree that
456 * intersect, so check the object returned carefully to make sure that no
457 * additional lookups are needed.
458 */
459struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
460 u64 start, u64 len)
461{
462 return __lookup_extent_mapping(tree, start, len, 1);
463}
464
465/*
466 * Find a nearby extent map intersecting @start + @len (not an exact search).
467 *
468 * @tree: tree to lookup in
469 * @start: byte offset to start the search
470 * @len: length of the lookup range
471 *
472 * Find and return the first extent_map struct in @tree that intersects the
473 * [start, len] range.
474 *
475 * If one can't be found, any nearby extent may be returned
476 */
477struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
478 u64 start, u64 len)
479{
480 return __lookup_extent_mapping(tree, start, len, 0);
481}
482
483/*
484 * Remove an extent_map from the extent tree.
485 *
486 * @tree: extent tree to remove from
487 * @em: extent map being removed
488 *
489 * Remove @em from @tree. No reference counts are dropped, and no checks
490 * are done to see if the range is in use.
491 */
492void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
493{
494 lockdep_assert_held_write(&tree->lock);
495
496 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
497 rb_erase_cached(&em->rb_node, &tree->map);
498 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
499 list_del_init(&em->list);
500 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
501 extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
502 RB_CLEAR_NODE(&em->rb_node);
503}
504
505void replace_extent_mapping(struct extent_map_tree *tree,
506 struct extent_map *cur,
507 struct extent_map *new,
508 int modified)
509{
510 lockdep_assert_held_write(&tree->lock);
511
512 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
513 ASSERT(extent_map_in_tree(cur));
514 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
515 list_del_init(&cur->list);
516 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
517 RB_CLEAR_NODE(&cur->rb_node);
518
519 setup_extent_mapping(tree, new, modified);
520}
521
522static struct extent_map *next_extent_map(const struct extent_map *em)
523{
524 struct rb_node *next;
525
526 next = rb_next(&em->rb_node);
527 if (!next)
528 return NULL;
529 return container_of(next, struct extent_map, rb_node);
530}
531
532static struct extent_map *prev_extent_map(struct extent_map *em)
533{
534 struct rb_node *prev;
535
536 prev = rb_prev(&em->rb_node);
537 if (!prev)
538 return NULL;
539 return container_of(prev, struct extent_map, rb_node);
540}
541
542/*
543 * Helper for btrfs_get_extent. Given an existing extent in the tree,
544 * the existing extent is the nearest extent to map_start,
545 * and an extent that you want to insert, deal with overlap and insert
546 * the best fitted new extent into the tree.
547 */
548static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
549 struct extent_map *existing,
550 struct extent_map *em,
551 u64 map_start)
552{
553 struct extent_map *prev;
554 struct extent_map *next;
555 u64 start;
556 u64 end;
557 u64 start_diff;
558
559 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
560
561 if (existing->start > map_start) {
562 next = existing;
563 prev = prev_extent_map(next);
564 } else {
565 prev = existing;
566 next = next_extent_map(prev);
567 }
568
569 start = prev ? extent_map_end(prev) : em->start;
570 start = max_t(u64, start, em->start);
571 end = next ? next->start : extent_map_end(em);
572 end = min_t(u64, end, extent_map_end(em));
573 start_diff = start - em->start;
574 em->start = start;
575 em->len = end - start;
576 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
577 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
578 em->block_start += start_diff;
579 em->block_len = em->len;
580 }
581 return add_extent_mapping(em_tree, em, 0);
582}
583
584/*
585 * Add extent mapping into em_tree.
586 *
587 * @fs_info: the filesystem
588 * @em_tree: extent tree into which we want to insert the extent mapping
589 * @em_in: extent we are inserting
590 * @start: start of the logical range btrfs_get_extent() is requesting
591 * @len: length of the logical range btrfs_get_extent() is requesting
592 *
593 * Note that @em_in's range may be different from [start, start+len),
594 * but they must be overlapped.
595 *
596 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
597 * the -EEXIST by either:
598 * a) Returning the existing extent in @em_in if @start is within the
599 * existing em.
600 * b) Merge the existing extent with @em_in passed in.
601 *
602 * Return 0 on success, otherwise -EEXIST.
603 *
604 */
605int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
606 struct extent_map_tree *em_tree,
607 struct extent_map **em_in, u64 start, u64 len)
608{
609 int ret;
610 struct extent_map *em = *em_in;
611
612 /*
613 * Tree-checker should have rejected any inline extent with non-zero
614 * file offset. Here just do a sanity check.
615 */
616 if (em->block_start == EXTENT_MAP_INLINE)
617 ASSERT(em->start == 0);
618
619 ret = add_extent_mapping(em_tree, em, 0);
620 /* it is possible that someone inserted the extent into the tree
621 * while we had the lock dropped. It is also possible that
622 * an overlapping map exists in the tree
623 */
624 if (ret == -EEXIST) {
625 struct extent_map *existing;
626
627 ret = 0;
628
629 existing = search_extent_mapping(em_tree, start, len);
630
631 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
632
633 /*
634 * existing will always be non-NULL, since there must be
635 * extent causing the -EEXIST.
636 */
637 if (start >= existing->start &&
638 start < extent_map_end(existing)) {
639 free_extent_map(em);
640 *em_in = existing;
641 ret = 0;
642 } else {
643 u64 orig_start = em->start;
644 u64 orig_len = em->len;
645
646 /*
647 * The existing extent map is the one nearest to
648 * the [start, start + len) range which overlaps
649 */
650 ret = merge_extent_mapping(em_tree, existing,
651 em, start);
652 if (ret) {
653 free_extent_map(em);
654 *em_in = NULL;
655 WARN_ONCE(ret,
656"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
657 ret, existing->start, existing->len,
658 orig_start, orig_len);
659 }
660 free_extent_map(existing);
661 }
662 }
663
664 ASSERT(ret == 0 || ret == -EEXIST);
665 return ret;
666}
667
668/*
669 * Drop all extent maps from a tree in the fastest possible way, rescheduling
670 * if needed. This avoids searching the tree, from the root down to the first
671 * extent map, before each deletion.
672 */
673static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
674{
675 write_lock(&tree->lock);
676 while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
677 struct extent_map *em;
678 struct rb_node *node;
679
680 node = rb_first_cached(&tree->map);
681 em = rb_entry(node, struct extent_map, rb_node);
682 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
683 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
684 remove_extent_mapping(tree, em);
685 free_extent_map(em);
686 cond_resched_rwlock_write(&tree->lock);
687 }
688 write_unlock(&tree->lock);
689}
690
691/*
692 * Drop all extent maps in a given range.
693 *
694 * @inode: The target inode.
695 * @start: Start offset of the range.
696 * @end: End offset of the range (inclusive value).
697 * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
698 *
699 * This drops all the extent maps that intersect the given range [@start, @end].
700 * Extent maps that partially overlap the range and extend behind or beyond it,
701 * are split.
702 * The caller should have locked an appropriate file range in the inode's io
703 * tree before calling this function.
704 */
705void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
706 bool skip_pinned)
707{
708 struct extent_map *split;
709 struct extent_map *split2;
710 struct extent_map *em;
711 struct extent_map_tree *em_tree = &inode->extent_tree;
712 u64 len = end - start + 1;
713
714 WARN_ON(end < start);
715 if (end == (u64)-1) {
716 if (start == 0 && !skip_pinned) {
717 drop_all_extent_maps_fast(em_tree);
718 return;
719 }
720 len = (u64)-1;
721 } else {
722 /* Make end offset exclusive for use in the loop below. */
723 end++;
724 }
725
726 /*
727 * It's ok if we fail to allocate the extent maps, see the comment near
728 * the bottom of the loop below. We only need two spare extent maps in
729 * the worst case, where the first extent map that intersects our range
730 * starts before the range and the last extent map that intersects our
731 * range ends after our range (and they might be the same extent map),
732 * because we need to split those two extent maps at the boundaries.
733 */
734 split = alloc_extent_map();
735 split2 = alloc_extent_map();
736
737 write_lock(&em_tree->lock);
738 em = lookup_extent_mapping(em_tree, start, len);
739
740 while (em) {
741 /* extent_map_end() returns exclusive value (last byte + 1). */
742 const u64 em_end = extent_map_end(em);
743 struct extent_map *next_em = NULL;
744 u64 gen;
745 unsigned long flags;
746 bool modified;
747 bool compressed;
748
749 if (em_end < end) {
750 next_em = next_extent_map(em);
751 if (next_em) {
752 if (next_em->start < end)
753 refcount_inc(&next_em->refs);
754 else
755 next_em = NULL;
756 }
757 }
758
759 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
760 start = em_end;
761 if (end != (u64)-1)
762 len = start + len - em_end;
763 goto next;
764 }
765
766 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
767 clear_bit(EXTENT_FLAG_LOGGING, &flags);
768 modified = !list_empty(&em->list);
769
770 /*
771 * The extent map does not cross our target range, so no need to
772 * split it, we can remove it directly.
773 */
774 if (em->start >= start && em_end <= end)
775 goto remove_em;
776
777 flags = em->flags;
778 gen = em->generation;
779 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
780
781 if (em->start < start) {
782 if (!split) {
783 split = split2;
784 split2 = NULL;
785 if (!split)
786 goto remove_em;
787 }
788 split->start = em->start;
789 split->len = start - em->start;
790
791 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
792 split->orig_start = em->orig_start;
793 split->block_start = em->block_start;
794
795 if (compressed)
796 split->block_len = em->block_len;
797 else
798 split->block_len = split->len;
799 split->orig_block_len = max(split->block_len,
800 em->orig_block_len);
801 split->ram_bytes = em->ram_bytes;
802 } else {
803 split->orig_start = split->start;
804 split->block_len = 0;
805 split->block_start = em->block_start;
806 split->orig_block_len = 0;
807 split->ram_bytes = split->len;
808 }
809
810 split->generation = gen;
811 split->flags = flags;
812 split->compress_type = em->compress_type;
813 replace_extent_mapping(em_tree, em, split, modified);
814 free_extent_map(split);
815 split = split2;
816 split2 = NULL;
817 }
818 if (em_end > end) {
819 if (!split) {
820 split = split2;
821 split2 = NULL;
822 if (!split)
823 goto remove_em;
824 }
825 split->start = start + len;
826 split->len = em_end - (start + len);
827 split->block_start = em->block_start;
828 split->flags = flags;
829 split->compress_type = em->compress_type;
830 split->generation = gen;
831
832 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
833 split->orig_block_len = max(em->block_len,
834 em->orig_block_len);
835
836 split->ram_bytes = em->ram_bytes;
837 if (compressed) {
838 split->block_len = em->block_len;
839 split->orig_start = em->orig_start;
840 } else {
841 const u64 diff = start + len - em->start;
842
843 split->block_len = split->len;
844 split->block_start += diff;
845 split->orig_start = em->orig_start;
846 }
847 } else {
848 split->ram_bytes = split->len;
849 split->orig_start = split->start;
850 split->block_len = 0;
851 split->orig_block_len = 0;
852 }
853
854 if (extent_map_in_tree(em)) {
855 replace_extent_mapping(em_tree, em, split,
856 modified);
857 } else {
858 int ret;
859
860 ret = add_extent_mapping(em_tree, split,
861 modified);
862 /* Logic error, shouldn't happen. */
863 ASSERT(ret == 0);
864 if (WARN_ON(ret != 0) && modified)
865 btrfs_set_inode_full_sync(inode);
866 }
867 free_extent_map(split);
868 split = NULL;
869 }
870remove_em:
871 if (extent_map_in_tree(em)) {
872 /*
873 * If the extent map is still in the tree it means that
874 * either of the following is true:
875 *
876 * 1) It fits entirely in our range (doesn't end beyond
877 * it or starts before it);
878 *
879 * 2) It starts before our range and/or ends after our
880 * range, and we were not able to allocate the extent
881 * maps for split operations, @split and @split2.
882 *
883 * If we are at case 2) then we just remove the entire
884 * extent map - this is fine since if anyone needs it to
885 * access the subranges outside our range, will just
886 * load it again from the subvolume tree's file extent
887 * item. However if the extent map was in the list of
888 * modified extents, then we must mark the inode for a
889 * full fsync, otherwise a fast fsync will miss this
890 * extent if it's new and needs to be logged.
891 */
892 if ((em->start < start || em_end > end) && modified) {
893 ASSERT(!split);
894 btrfs_set_inode_full_sync(inode);
895 }
896 remove_extent_mapping(em_tree, em);
897 }
898
899 /*
900 * Once for the tree reference (we replaced or removed the
901 * extent map from the tree).
902 */
903 free_extent_map(em);
904next:
905 /* Once for us (for our lookup reference). */
906 free_extent_map(em);
907
908 em = next_em;
909 }
910
911 write_unlock(&em_tree->lock);
912
913 free_extent_map(split);
914 free_extent_map(split2);
915}
916
917/*
918 * Replace a range in the inode's extent map tree with a new extent map.
919 *
920 * @inode: The target inode.
921 * @new_em: The new extent map to add to the inode's extent map tree.
922 * @modified: Indicate if the new extent map should be added to the list of
923 * modified extents (for fast fsync tracking).
924 *
925 * Drops all the extent maps in the inode's extent map tree that intersect the
926 * range of the new extent map and adds the new extent map to the tree.
927 * The caller should have locked an appropriate file range in the inode's io
928 * tree before calling this function.
929 */
930int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
931 struct extent_map *new_em,
932 bool modified)
933{
934 const u64 end = new_em->start + new_em->len - 1;
935 struct extent_map_tree *tree = &inode->extent_tree;
936 int ret;
937
938 ASSERT(!extent_map_in_tree(new_em));
939
940 /*
941 * The caller has locked an appropriate file range in the inode's io
942 * tree, but getting -EEXIST when adding the new extent map can still
943 * happen in case there are extents that partially cover the range, and
944 * this is due to two tasks operating on different parts of the extent.
945 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
946 * btrfs_get_extent") for an example and details.
947 */
948 do {
949 btrfs_drop_extent_map_range(inode, new_em->start, end, false);
950 write_lock(&tree->lock);
951 ret = add_extent_mapping(tree, new_em, modified);
952 write_unlock(&tree->lock);
953 } while (ret == -EEXIST);
954
955 return ret;
956}
1#include <linux/err.h>
2#include <linux/slab.h>
3#include <linux/spinlock.h>
4#include <linux/hardirq.h>
5#include "ctree.h"
6#include "extent_map.h"
7#include "compression.h"
8
9
10static struct kmem_cache *extent_map_cache;
11
12int __init extent_map_init(void)
13{
14 extent_map_cache = kmem_cache_create("btrfs_extent_map",
15 sizeof(struct extent_map), 0,
16 SLAB_MEM_SPREAD, NULL);
17 if (!extent_map_cache)
18 return -ENOMEM;
19 return 0;
20}
21
22void extent_map_exit(void)
23{
24 kmem_cache_destroy(extent_map_cache);
25}
26
27/**
28 * extent_map_tree_init - initialize extent map tree
29 * @tree: tree to initialize
30 *
31 * Initialize the extent tree @tree. Should be called for each new inode
32 * or other user of the extent_map interface.
33 */
34void extent_map_tree_init(struct extent_map_tree *tree)
35{
36 tree->map = RB_ROOT;
37 INIT_LIST_HEAD(&tree->modified_extents);
38 rwlock_init(&tree->lock);
39}
40
41/**
42 * alloc_extent_map - allocate new extent map structure
43 *
44 * Allocate a new extent_map structure. The new structure is
45 * returned with a reference count of one and needs to be
46 * freed using free_extent_map()
47 */
48struct extent_map *alloc_extent_map(void)
49{
50 struct extent_map *em;
51 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
52 if (!em)
53 return NULL;
54 RB_CLEAR_NODE(&em->rb_node);
55 em->flags = 0;
56 em->compress_type = BTRFS_COMPRESS_NONE;
57 em->generation = 0;
58 atomic_set(&em->refs, 1);
59 INIT_LIST_HEAD(&em->list);
60 return em;
61}
62
63/**
64 * free_extent_map - drop reference count of an extent_map
65 * @em: extent map being released
66 *
67 * Drops the reference out on @em by one and free the structure
68 * if the reference count hits zero.
69 */
70void free_extent_map(struct extent_map *em)
71{
72 if (!em)
73 return;
74 WARN_ON(atomic_read(&em->refs) == 0);
75 if (atomic_dec_and_test(&em->refs)) {
76 WARN_ON(extent_map_in_tree(em));
77 WARN_ON(!list_empty(&em->list));
78 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
79 kfree(em->map_lookup);
80 kmem_cache_free(extent_map_cache, em);
81 }
82}
83
84/* simple helper to do math around the end of an extent, handling wrap */
85static u64 range_end(u64 start, u64 len)
86{
87 if (start + len < start)
88 return (u64)-1;
89 return start + len;
90}
91
92static int tree_insert(struct rb_root *root, struct extent_map *em)
93{
94 struct rb_node **p = &root->rb_node;
95 struct rb_node *parent = NULL;
96 struct extent_map *entry = NULL;
97 struct rb_node *orig_parent = NULL;
98 u64 end = range_end(em->start, em->len);
99
100 while (*p) {
101 parent = *p;
102 entry = rb_entry(parent, struct extent_map, rb_node);
103
104 if (em->start < entry->start)
105 p = &(*p)->rb_left;
106 else if (em->start >= extent_map_end(entry))
107 p = &(*p)->rb_right;
108 else
109 return -EEXIST;
110 }
111
112 orig_parent = parent;
113 while (parent && em->start >= extent_map_end(entry)) {
114 parent = rb_next(parent);
115 entry = rb_entry(parent, struct extent_map, rb_node);
116 }
117 if (parent)
118 if (end > entry->start && em->start < extent_map_end(entry))
119 return -EEXIST;
120
121 parent = orig_parent;
122 entry = rb_entry(parent, struct extent_map, rb_node);
123 while (parent && em->start < entry->start) {
124 parent = rb_prev(parent);
125 entry = rb_entry(parent, struct extent_map, rb_node);
126 }
127 if (parent)
128 if (end > entry->start && em->start < extent_map_end(entry))
129 return -EEXIST;
130
131 rb_link_node(&em->rb_node, orig_parent, p);
132 rb_insert_color(&em->rb_node, root);
133 return 0;
134}
135
136/*
137 * search through the tree for an extent_map with a given offset. If
138 * it can't be found, try to find some neighboring extents
139 */
140static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
141 struct rb_node **prev_ret,
142 struct rb_node **next_ret)
143{
144 struct rb_node *n = root->rb_node;
145 struct rb_node *prev = NULL;
146 struct rb_node *orig_prev = NULL;
147 struct extent_map *entry;
148 struct extent_map *prev_entry = NULL;
149
150 while (n) {
151 entry = rb_entry(n, struct extent_map, rb_node);
152 prev = n;
153 prev_entry = entry;
154
155 if (offset < entry->start)
156 n = n->rb_left;
157 else if (offset >= extent_map_end(entry))
158 n = n->rb_right;
159 else
160 return n;
161 }
162
163 if (prev_ret) {
164 orig_prev = prev;
165 while (prev && offset >= extent_map_end(prev_entry)) {
166 prev = rb_next(prev);
167 prev_entry = rb_entry(prev, struct extent_map, rb_node);
168 }
169 *prev_ret = prev;
170 prev = orig_prev;
171 }
172
173 if (next_ret) {
174 prev_entry = rb_entry(prev, struct extent_map, rb_node);
175 while (prev && offset < prev_entry->start) {
176 prev = rb_prev(prev);
177 prev_entry = rb_entry(prev, struct extent_map, rb_node);
178 }
179 *next_ret = prev;
180 }
181 return NULL;
182}
183
184/* check to see if two extent_map structs are adjacent and safe to merge */
185static int mergable_maps(struct extent_map *prev, struct extent_map *next)
186{
187 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
188 return 0;
189
190 /*
191 * don't merge compressed extents, we need to know their
192 * actual size
193 */
194 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
195 return 0;
196
197 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
198 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
199 return 0;
200
201 /*
202 * We don't want to merge stuff that hasn't been written to the log yet
203 * since it may not reflect exactly what is on disk, and that would be
204 * bad.
205 */
206 if (!list_empty(&prev->list) || !list_empty(&next->list))
207 return 0;
208
209 if (extent_map_end(prev) == next->start &&
210 prev->flags == next->flags &&
211 prev->bdev == next->bdev &&
212 ((next->block_start == EXTENT_MAP_HOLE &&
213 prev->block_start == EXTENT_MAP_HOLE) ||
214 (next->block_start == EXTENT_MAP_INLINE &&
215 prev->block_start == EXTENT_MAP_INLINE) ||
216 (next->block_start == EXTENT_MAP_DELALLOC &&
217 prev->block_start == EXTENT_MAP_DELALLOC) ||
218 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
219 next->block_start == extent_map_block_end(prev)))) {
220 return 1;
221 }
222 return 0;
223}
224
225static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
226{
227 struct extent_map *merge = NULL;
228 struct rb_node *rb;
229
230 if (em->start != 0) {
231 rb = rb_prev(&em->rb_node);
232 if (rb)
233 merge = rb_entry(rb, struct extent_map, rb_node);
234 if (rb && mergable_maps(merge, em)) {
235 em->start = merge->start;
236 em->orig_start = merge->orig_start;
237 em->len += merge->len;
238 em->block_len += merge->block_len;
239 em->block_start = merge->block_start;
240 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
241 em->mod_start = merge->mod_start;
242 em->generation = max(em->generation, merge->generation);
243
244 rb_erase(&merge->rb_node, &tree->map);
245 RB_CLEAR_NODE(&merge->rb_node);
246 free_extent_map(merge);
247 }
248 }
249
250 rb = rb_next(&em->rb_node);
251 if (rb)
252 merge = rb_entry(rb, struct extent_map, rb_node);
253 if (rb && mergable_maps(em, merge)) {
254 em->len += merge->len;
255 em->block_len += merge->block_len;
256 rb_erase(&merge->rb_node, &tree->map);
257 RB_CLEAR_NODE(&merge->rb_node);
258 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
259 em->generation = max(em->generation, merge->generation);
260 free_extent_map(merge);
261 }
262}
263
264/**
265 * unpin_extent_cache - unpin an extent from the cache
266 * @tree: tree to unpin the extent in
267 * @start: logical offset in the file
268 * @len: length of the extent
269 * @gen: generation that this extent has been modified in
270 *
271 * Called after an extent has been written to disk properly. Set the generation
272 * to the generation that actually added the file item to the inode so we know
273 * we need to sync this extent when we call fsync().
274 */
275int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
276 u64 gen)
277{
278 int ret = 0;
279 struct extent_map *em;
280 bool prealloc = false;
281
282 write_lock(&tree->lock);
283 em = lookup_extent_mapping(tree, start, len);
284
285 WARN_ON(!em || em->start != start);
286
287 if (!em)
288 goto out;
289
290 em->generation = gen;
291 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
292 em->mod_start = em->start;
293 em->mod_len = em->len;
294
295 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
296 prealloc = true;
297 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
298 }
299
300 try_merge_map(tree, em);
301
302 if (prealloc) {
303 em->mod_start = em->start;
304 em->mod_len = em->len;
305 }
306
307 free_extent_map(em);
308out:
309 write_unlock(&tree->lock);
310 return ret;
311
312}
313
314void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
315{
316 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
317 if (extent_map_in_tree(em))
318 try_merge_map(tree, em);
319}
320
321static inline void setup_extent_mapping(struct extent_map_tree *tree,
322 struct extent_map *em,
323 int modified)
324{
325 atomic_inc(&em->refs);
326 em->mod_start = em->start;
327 em->mod_len = em->len;
328
329 if (modified)
330 list_move(&em->list, &tree->modified_extents);
331 else
332 try_merge_map(tree, em);
333}
334
335/**
336 * add_extent_mapping - add new extent map to the extent tree
337 * @tree: tree to insert new map in
338 * @em: map to insert
339 *
340 * Insert @em into @tree or perform a simple forward/backward merge with
341 * existing mappings. The extent_map struct passed in will be inserted
342 * into the tree directly, with an additional reference taken, or a
343 * reference dropped if the merge attempt was successful.
344 */
345int add_extent_mapping(struct extent_map_tree *tree,
346 struct extent_map *em, int modified)
347{
348 int ret = 0;
349
350 ret = tree_insert(&tree->map, em);
351 if (ret)
352 goto out;
353
354 setup_extent_mapping(tree, em, modified);
355out:
356 return ret;
357}
358
359static struct extent_map *
360__lookup_extent_mapping(struct extent_map_tree *tree,
361 u64 start, u64 len, int strict)
362{
363 struct extent_map *em;
364 struct rb_node *rb_node;
365 struct rb_node *prev = NULL;
366 struct rb_node *next = NULL;
367 u64 end = range_end(start, len);
368
369 rb_node = __tree_search(&tree->map, start, &prev, &next);
370 if (!rb_node) {
371 if (prev)
372 rb_node = prev;
373 else if (next)
374 rb_node = next;
375 else
376 return NULL;
377 }
378
379 em = rb_entry(rb_node, struct extent_map, rb_node);
380
381 if (strict && !(end > em->start && start < extent_map_end(em)))
382 return NULL;
383
384 atomic_inc(&em->refs);
385 return em;
386}
387
388/**
389 * lookup_extent_mapping - lookup extent_map
390 * @tree: tree to lookup in
391 * @start: byte offset to start the search
392 * @len: length of the lookup range
393 *
394 * Find and return the first extent_map struct in @tree that intersects the
395 * [start, len] range. There may be additional objects in the tree that
396 * intersect, so check the object returned carefully to make sure that no
397 * additional lookups are needed.
398 */
399struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
400 u64 start, u64 len)
401{
402 return __lookup_extent_mapping(tree, start, len, 1);
403}
404
405/**
406 * search_extent_mapping - find a nearby extent map
407 * @tree: tree to lookup in
408 * @start: byte offset to start the search
409 * @len: length of the lookup range
410 *
411 * Find and return the first extent_map struct in @tree that intersects the
412 * [start, len] range.
413 *
414 * If one can't be found, any nearby extent may be returned
415 */
416struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
417 u64 start, u64 len)
418{
419 return __lookup_extent_mapping(tree, start, len, 0);
420}
421
422/**
423 * remove_extent_mapping - removes an extent_map from the extent tree
424 * @tree: extent tree to remove from
425 * @em: extent map being removed
426 *
427 * Removes @em from @tree. No reference counts are dropped, and no checks
428 * are done to see if the range is in use
429 */
430int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
431{
432 int ret = 0;
433
434 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
435 rb_erase(&em->rb_node, &tree->map);
436 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
437 list_del_init(&em->list);
438 RB_CLEAR_NODE(&em->rb_node);
439 return ret;
440}
441
442void replace_extent_mapping(struct extent_map_tree *tree,
443 struct extent_map *cur,
444 struct extent_map *new,
445 int modified)
446{
447 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
448 ASSERT(extent_map_in_tree(cur));
449 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
450 list_del_init(&cur->list);
451 rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map);
452 RB_CLEAR_NODE(&cur->rb_node);
453
454 setup_extent_mapping(tree, new, modified);
455}