Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/err.h>
4#include <linux/slab.h>
5#include <linux/spinlock.h>
6#include "ctree.h"
7#include "volumes.h"
8#include "extent_map.h"
9#include "compression.h"
10
11
12static struct kmem_cache *extent_map_cache;
13
14int __init extent_map_init(void)
15{
16 extent_map_cache = kmem_cache_create("btrfs_extent_map",
17 sizeof(struct extent_map), 0,
18 SLAB_MEM_SPREAD, NULL);
19 if (!extent_map_cache)
20 return -ENOMEM;
21 return 0;
22}
23
24void __cold extent_map_exit(void)
25{
26 kmem_cache_destroy(extent_map_cache);
27}
28
29/**
30 * extent_map_tree_init - initialize extent map tree
31 * @tree: tree to initialize
32 *
33 * Initialize the extent tree @tree. Should be called for each new inode
34 * or other user of the extent_map interface.
35 */
36void extent_map_tree_init(struct extent_map_tree *tree)
37{
38 tree->map = RB_ROOT_CACHED;
39 INIT_LIST_HEAD(&tree->modified_extents);
40 rwlock_init(&tree->lock);
41}
42
43/**
44 * alloc_extent_map - allocate new extent map structure
45 *
46 * Allocate a new extent_map structure. The new structure is
47 * returned with a reference count of one and needs to be
48 * freed using free_extent_map()
49 */
50struct extent_map *alloc_extent_map(void)
51{
52 struct extent_map *em;
53 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
54 if (!em)
55 return NULL;
56 RB_CLEAR_NODE(&em->rb_node);
57 em->flags = 0;
58 em->compress_type = BTRFS_COMPRESS_NONE;
59 em->generation = 0;
60 refcount_set(&em->refs, 1);
61 INIT_LIST_HEAD(&em->list);
62 return em;
63}
64
65/**
66 * free_extent_map - drop reference count of an extent_map
67 * @em: extent map being released
68 *
69 * Drops the reference out on @em by one and free the structure
70 * if the reference count hits zero.
71 */
72void free_extent_map(struct extent_map *em)
73{
74 if (!em)
75 return;
76 WARN_ON(refcount_read(&em->refs) == 0);
77 if (refcount_dec_and_test(&em->refs)) {
78 WARN_ON(extent_map_in_tree(em));
79 WARN_ON(!list_empty(&em->list));
80 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
81 kfree(em->map_lookup);
82 kmem_cache_free(extent_map_cache, em);
83 }
84}
85
86/* simple helper to do math around the end of an extent, handling wrap */
87static u64 range_end(u64 start, u64 len)
88{
89 if (start + len < start)
90 return (u64)-1;
91 return start + len;
92}
93
94static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
95{
96 struct rb_node **p = &root->rb_root.rb_node;
97 struct rb_node *parent = NULL;
98 struct extent_map *entry = NULL;
99 struct rb_node *orig_parent = NULL;
100 u64 end = range_end(em->start, em->len);
101 bool leftmost = true;
102
103 while (*p) {
104 parent = *p;
105 entry = rb_entry(parent, struct extent_map, rb_node);
106
107 if (em->start < entry->start) {
108 p = &(*p)->rb_left;
109 } else if (em->start >= extent_map_end(entry)) {
110 p = &(*p)->rb_right;
111 leftmost = false;
112 } else {
113 return -EEXIST;
114 }
115 }
116
117 orig_parent = parent;
118 while (parent && em->start >= extent_map_end(entry)) {
119 parent = rb_next(parent);
120 entry = rb_entry(parent, struct extent_map, rb_node);
121 }
122 if (parent)
123 if (end > entry->start && em->start < extent_map_end(entry))
124 return -EEXIST;
125
126 parent = orig_parent;
127 entry = rb_entry(parent, struct extent_map, rb_node);
128 while (parent && em->start < entry->start) {
129 parent = rb_prev(parent);
130 entry = rb_entry(parent, struct extent_map, rb_node);
131 }
132 if (parent)
133 if (end > entry->start && em->start < extent_map_end(entry))
134 return -EEXIST;
135
136 rb_link_node(&em->rb_node, orig_parent, p);
137 rb_insert_color_cached(&em->rb_node, root, leftmost);
138 return 0;
139}
140
141/*
142 * search through the tree for an extent_map with a given offset. If
143 * it can't be found, try to find some neighboring extents
144 */
145static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
146 struct rb_node **prev_ret,
147 struct rb_node **next_ret)
148{
149 struct rb_node *n = root->rb_node;
150 struct rb_node *prev = NULL;
151 struct rb_node *orig_prev = NULL;
152 struct extent_map *entry;
153 struct extent_map *prev_entry = NULL;
154
155 while (n) {
156 entry = rb_entry(n, struct extent_map, rb_node);
157 prev = n;
158 prev_entry = entry;
159
160 if (offset < entry->start)
161 n = n->rb_left;
162 else if (offset >= extent_map_end(entry))
163 n = n->rb_right;
164 else
165 return n;
166 }
167
168 if (prev_ret) {
169 orig_prev = prev;
170 while (prev && offset >= extent_map_end(prev_entry)) {
171 prev = rb_next(prev);
172 prev_entry = rb_entry(prev, struct extent_map, rb_node);
173 }
174 *prev_ret = prev;
175 prev = orig_prev;
176 }
177
178 if (next_ret) {
179 prev_entry = rb_entry(prev, struct extent_map, rb_node);
180 while (prev && offset < prev_entry->start) {
181 prev = rb_prev(prev);
182 prev_entry = rb_entry(prev, struct extent_map, rb_node);
183 }
184 *next_ret = prev;
185 }
186 return NULL;
187}
188
189/* check to see if two extent_map structs are adjacent and safe to merge */
190static int mergable_maps(struct extent_map *prev, struct extent_map *next)
191{
192 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
193 return 0;
194
195 /*
196 * don't merge compressed extents, we need to know their
197 * actual size
198 */
199 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
200 return 0;
201
202 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
203 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
204 return 0;
205
206 /*
207 * We don't want to merge stuff that hasn't been written to the log yet
208 * since it may not reflect exactly what is on disk, and that would be
209 * bad.
210 */
211 if (!list_empty(&prev->list) || !list_empty(&next->list))
212 return 0;
213
214 ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
215 prev->block_start != EXTENT_MAP_DELALLOC);
216
217 if (extent_map_end(prev) == next->start &&
218 prev->flags == next->flags &&
219 prev->bdev == next->bdev &&
220 ((next->block_start == EXTENT_MAP_HOLE &&
221 prev->block_start == EXTENT_MAP_HOLE) ||
222 (next->block_start == EXTENT_MAP_INLINE &&
223 prev->block_start == EXTENT_MAP_INLINE) ||
224 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
225 next->block_start == extent_map_block_end(prev)))) {
226 return 1;
227 }
228 return 0;
229}
230
231static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
232{
233 struct extent_map *merge = NULL;
234 struct rb_node *rb;
235
236 if (em->start != 0) {
237 rb = rb_prev(&em->rb_node);
238 if (rb)
239 merge = rb_entry(rb, struct extent_map, rb_node);
240 if (rb && mergable_maps(merge, em)) {
241 em->start = merge->start;
242 em->orig_start = merge->orig_start;
243 em->len += merge->len;
244 em->block_len += merge->block_len;
245 em->block_start = merge->block_start;
246 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
247 em->mod_start = merge->mod_start;
248 em->generation = max(em->generation, merge->generation);
249
250 rb_erase_cached(&merge->rb_node, &tree->map);
251 RB_CLEAR_NODE(&merge->rb_node);
252 free_extent_map(merge);
253 }
254 }
255
256 rb = rb_next(&em->rb_node);
257 if (rb)
258 merge = rb_entry(rb, struct extent_map, rb_node);
259 if (rb && mergable_maps(em, merge)) {
260 em->len += merge->len;
261 em->block_len += merge->block_len;
262 rb_erase_cached(&merge->rb_node, &tree->map);
263 RB_CLEAR_NODE(&merge->rb_node);
264 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
265 em->generation = max(em->generation, merge->generation);
266 free_extent_map(merge);
267 }
268}
269
270/**
271 * unpin_extent_cache - unpin an extent from the cache
272 * @tree: tree to unpin the extent in
273 * @start: logical offset in the file
274 * @len: length of the extent
275 * @gen: generation that this extent has been modified in
276 *
277 * Called after an extent has been written to disk properly. Set the generation
278 * to the generation that actually added the file item to the inode so we know
279 * we need to sync this extent when we call fsync().
280 */
281int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
282 u64 gen)
283{
284 int ret = 0;
285 struct extent_map *em;
286 bool prealloc = false;
287
288 write_lock(&tree->lock);
289 em = lookup_extent_mapping(tree, start, len);
290
291 WARN_ON(!em || em->start != start);
292
293 if (!em)
294 goto out;
295
296 em->generation = gen;
297 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
298 em->mod_start = em->start;
299 em->mod_len = em->len;
300
301 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
302 prealloc = true;
303 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
304 }
305
306 try_merge_map(tree, em);
307
308 if (prealloc) {
309 em->mod_start = em->start;
310 em->mod_len = em->len;
311 }
312
313 free_extent_map(em);
314out:
315 write_unlock(&tree->lock);
316 return ret;
317
318}
319
320void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
321{
322 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
323 if (extent_map_in_tree(em))
324 try_merge_map(tree, em);
325}
326
327static inline void setup_extent_mapping(struct extent_map_tree *tree,
328 struct extent_map *em,
329 int modified)
330{
331 refcount_inc(&em->refs);
332 em->mod_start = em->start;
333 em->mod_len = em->len;
334
335 if (modified)
336 list_move(&em->list, &tree->modified_extents);
337 else
338 try_merge_map(tree, em);
339}
340
341static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
342{
343 struct map_lookup *map = em->map_lookup;
344 u64 stripe_size = em->orig_block_len;
345 int i;
346
347 for (i = 0; i < map->num_stripes; i++) {
348 struct btrfs_bio_stripe *stripe = &map->stripes[i];
349 struct btrfs_device *device = stripe->dev;
350
351 set_extent_bits_nowait(&device->alloc_state, stripe->physical,
352 stripe->physical + stripe_size - 1, bits);
353 }
354}
355
356static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
357{
358 struct map_lookup *map = em->map_lookup;
359 u64 stripe_size = em->orig_block_len;
360 int i;
361
362 for (i = 0; i < map->num_stripes; i++) {
363 struct btrfs_bio_stripe *stripe = &map->stripes[i];
364 struct btrfs_device *device = stripe->dev;
365
366 __clear_extent_bit(&device->alloc_state, stripe->physical,
367 stripe->physical + stripe_size - 1, bits,
368 0, 0, NULL, GFP_NOWAIT, NULL);
369 }
370}
371
372/**
373 * add_extent_mapping - add new extent map to the extent tree
374 * @tree: tree to insert new map in
375 * @em: map to insert
376 *
377 * Insert @em into @tree or perform a simple forward/backward merge with
378 * existing mappings. The extent_map struct passed in will be inserted
379 * into the tree directly, with an additional reference taken, or a
380 * reference dropped if the merge attempt was successful.
381 */
382int add_extent_mapping(struct extent_map_tree *tree,
383 struct extent_map *em, int modified)
384{
385 int ret = 0;
386
387 lockdep_assert_held_write(&tree->lock);
388
389 ret = tree_insert(&tree->map, em);
390 if (ret)
391 goto out;
392
393 setup_extent_mapping(tree, em, modified);
394 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
395 extent_map_device_set_bits(em, CHUNK_ALLOCATED);
396 extent_map_device_clear_bits(em, CHUNK_TRIMMED);
397 }
398out:
399 return ret;
400}
401
402static struct extent_map *
403__lookup_extent_mapping(struct extent_map_tree *tree,
404 u64 start, u64 len, int strict)
405{
406 struct extent_map *em;
407 struct rb_node *rb_node;
408 struct rb_node *prev = NULL;
409 struct rb_node *next = NULL;
410 u64 end = range_end(start, len);
411
412 rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
413 if (!rb_node) {
414 if (prev)
415 rb_node = prev;
416 else if (next)
417 rb_node = next;
418 else
419 return NULL;
420 }
421
422 em = rb_entry(rb_node, struct extent_map, rb_node);
423
424 if (strict && !(end > em->start && start < extent_map_end(em)))
425 return NULL;
426
427 refcount_inc(&em->refs);
428 return em;
429}
430
431/**
432 * lookup_extent_mapping - lookup extent_map
433 * @tree: tree to lookup in
434 * @start: byte offset to start the search
435 * @len: length of the lookup range
436 *
437 * Find and return the first extent_map struct in @tree that intersects the
438 * [start, len] range. There may be additional objects in the tree that
439 * intersect, so check the object returned carefully to make sure that no
440 * additional lookups are needed.
441 */
442struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
443 u64 start, u64 len)
444{
445 return __lookup_extent_mapping(tree, start, len, 1);
446}
447
448/**
449 * search_extent_mapping - find a nearby extent map
450 * @tree: tree to lookup in
451 * @start: byte offset to start the search
452 * @len: length of the lookup range
453 *
454 * Find and return the first extent_map struct in @tree that intersects the
455 * [start, len] range.
456 *
457 * If one can't be found, any nearby extent may be returned
458 */
459struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
460 u64 start, u64 len)
461{
462 return __lookup_extent_mapping(tree, start, len, 0);
463}
464
465/**
466 * remove_extent_mapping - removes an extent_map from the extent tree
467 * @tree: extent tree to remove from
468 * @em: extent map being removed
469 *
470 * Removes @em from @tree. No reference counts are dropped, and no checks
471 * are done to see if the range is in use
472 */
473void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
474{
475 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
476 rb_erase_cached(&em->rb_node, &tree->map);
477 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
478 list_del_init(&em->list);
479 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
480 extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
481 RB_CLEAR_NODE(&em->rb_node);
482}
483
484void replace_extent_mapping(struct extent_map_tree *tree,
485 struct extent_map *cur,
486 struct extent_map *new,
487 int modified)
488{
489 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
490 ASSERT(extent_map_in_tree(cur));
491 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
492 list_del_init(&cur->list);
493 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
494 RB_CLEAR_NODE(&cur->rb_node);
495
496 setup_extent_mapping(tree, new, modified);
497}
498
499static struct extent_map *next_extent_map(struct extent_map *em)
500{
501 struct rb_node *next;
502
503 next = rb_next(&em->rb_node);
504 if (!next)
505 return NULL;
506 return container_of(next, struct extent_map, rb_node);
507}
508
509static struct extent_map *prev_extent_map(struct extent_map *em)
510{
511 struct rb_node *prev;
512
513 prev = rb_prev(&em->rb_node);
514 if (!prev)
515 return NULL;
516 return container_of(prev, struct extent_map, rb_node);
517}
518
519/*
520 * Helper for btrfs_get_extent. Given an existing extent in the tree,
521 * the existing extent is the nearest extent to map_start,
522 * and an extent that you want to insert, deal with overlap and insert
523 * the best fitted new extent into the tree.
524 */
525static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
526 struct extent_map *existing,
527 struct extent_map *em,
528 u64 map_start)
529{
530 struct extent_map *prev;
531 struct extent_map *next;
532 u64 start;
533 u64 end;
534 u64 start_diff;
535
536 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
537
538 if (existing->start > map_start) {
539 next = existing;
540 prev = prev_extent_map(next);
541 } else {
542 prev = existing;
543 next = next_extent_map(prev);
544 }
545
546 start = prev ? extent_map_end(prev) : em->start;
547 start = max_t(u64, start, em->start);
548 end = next ? next->start : extent_map_end(em);
549 end = min_t(u64, end, extent_map_end(em));
550 start_diff = start - em->start;
551 em->start = start;
552 em->len = end - start;
553 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
554 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
555 em->block_start += start_diff;
556 em->block_len = em->len;
557 }
558 return add_extent_mapping(em_tree, em, 0);
559}
560
561/**
562 * btrfs_add_extent_mapping - add extent mapping into em_tree
563 * @fs_info - used for tracepoint
564 * @em_tree - the extent tree into which we want to insert the extent mapping
565 * @em_in - extent we are inserting
566 * @start - start of the logical range btrfs_get_extent() is requesting
567 * @len - length of the logical range btrfs_get_extent() is requesting
568 *
569 * Note that @em_in's range may be different from [start, start+len),
570 * but they must be overlapped.
571 *
572 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
573 * the -EEXIST by either:
574 * a) Returning the existing extent in @em_in if @start is within the
575 * existing em.
576 * b) Merge the existing extent with @em_in passed in.
577 *
578 * Return 0 on success, otherwise -EEXIST.
579 *
580 */
581int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
582 struct extent_map_tree *em_tree,
583 struct extent_map **em_in, u64 start, u64 len)
584{
585 int ret;
586 struct extent_map *em = *em_in;
587
588 ret = add_extent_mapping(em_tree, em, 0);
589 /* it is possible that someone inserted the extent into the tree
590 * while we had the lock dropped. It is also possible that
591 * an overlapping map exists in the tree
592 */
593 if (ret == -EEXIST) {
594 struct extent_map *existing;
595
596 ret = 0;
597
598 existing = search_extent_mapping(em_tree, start, len);
599
600 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
601
602 /*
603 * existing will always be non-NULL, since there must be
604 * extent causing the -EEXIST.
605 */
606 if (start >= existing->start &&
607 start < extent_map_end(existing)) {
608 free_extent_map(em);
609 *em_in = existing;
610 ret = 0;
611 } else {
612 u64 orig_start = em->start;
613 u64 orig_len = em->len;
614
615 /*
616 * The existing extent map is the one nearest to
617 * the [start, start + len) range which overlaps
618 */
619 ret = merge_extent_mapping(em_tree, existing,
620 em, start);
621 if (ret) {
622 free_extent_map(em);
623 *em_in = NULL;
624 WARN_ONCE(ret,
625"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
626 ret, existing->start, existing->len,
627 orig_start, orig_len);
628 }
629 free_extent_map(existing);
630 }
631 }
632
633 ASSERT(ret == 0 || ret == -EEXIST);
634 return ret;
635}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/err.h>
4#include <linux/slab.h>
5#include <linux/spinlock.h>
6#include "messages.h"
7#include "ctree.h"
8#include "volumes.h"
9#include "extent_map.h"
10#include "compression.h"
11#include "btrfs_inode.h"
12
13
14static struct kmem_cache *extent_map_cache;
15
16int __init extent_map_init(void)
17{
18 extent_map_cache = kmem_cache_create("btrfs_extent_map",
19 sizeof(struct extent_map), 0,
20 SLAB_MEM_SPREAD, NULL);
21 if (!extent_map_cache)
22 return -ENOMEM;
23 return 0;
24}
25
26void __cold extent_map_exit(void)
27{
28 kmem_cache_destroy(extent_map_cache);
29}
30
31/*
32 * Initialize the extent tree @tree. Should be called for each new inode or
33 * other user of the extent_map interface.
34 */
35void extent_map_tree_init(struct extent_map_tree *tree)
36{
37 tree->map = RB_ROOT_CACHED;
38 INIT_LIST_HEAD(&tree->modified_extents);
39 rwlock_init(&tree->lock);
40}
41
42/*
43 * Allocate a new extent_map structure. The new structure is returned with a
44 * reference count of one and needs to be freed using free_extent_map()
45 */
46struct extent_map *alloc_extent_map(void)
47{
48 struct extent_map *em;
49 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
50 if (!em)
51 return NULL;
52 RB_CLEAR_NODE(&em->rb_node);
53 refcount_set(&em->refs, 1);
54 INIT_LIST_HEAD(&em->list);
55 return em;
56}
57
58/*
59 * Drop the reference out on @em by one and free the structure if the reference
60 * count hits zero.
61 */
62void free_extent_map(struct extent_map *em)
63{
64 if (!em)
65 return;
66 if (refcount_dec_and_test(&em->refs)) {
67 WARN_ON(extent_map_in_tree(em));
68 WARN_ON(!list_empty(&em->list));
69 kmem_cache_free(extent_map_cache, em);
70 }
71}
72
73/* Do the math around the end of an extent, handling wrapping. */
74static u64 range_end(u64 start, u64 len)
75{
76 if (start + len < start)
77 return (u64)-1;
78 return start + len;
79}
80
81static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
82{
83 struct rb_node **p = &root->rb_root.rb_node;
84 struct rb_node *parent = NULL;
85 struct extent_map *entry = NULL;
86 struct rb_node *orig_parent = NULL;
87 u64 end = range_end(em->start, em->len);
88 bool leftmost = true;
89
90 while (*p) {
91 parent = *p;
92 entry = rb_entry(parent, struct extent_map, rb_node);
93
94 if (em->start < entry->start) {
95 p = &(*p)->rb_left;
96 } else if (em->start >= extent_map_end(entry)) {
97 p = &(*p)->rb_right;
98 leftmost = false;
99 } else {
100 return -EEXIST;
101 }
102 }
103
104 orig_parent = parent;
105 while (parent && em->start >= extent_map_end(entry)) {
106 parent = rb_next(parent);
107 entry = rb_entry(parent, struct extent_map, rb_node);
108 }
109 if (parent)
110 if (end > entry->start && em->start < extent_map_end(entry))
111 return -EEXIST;
112
113 parent = orig_parent;
114 entry = rb_entry(parent, struct extent_map, rb_node);
115 while (parent && em->start < entry->start) {
116 parent = rb_prev(parent);
117 entry = rb_entry(parent, struct extent_map, rb_node);
118 }
119 if (parent)
120 if (end > entry->start && em->start < extent_map_end(entry))
121 return -EEXIST;
122
123 rb_link_node(&em->rb_node, orig_parent, p);
124 rb_insert_color_cached(&em->rb_node, root, leftmost);
125 return 0;
126}
127
128/*
129 * Search through the tree for an extent_map with a given offset. If it can't
130 * be found, try to find some neighboring extents
131 */
132static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
133 struct rb_node **prev_or_next_ret)
134{
135 struct rb_node *n = root->rb_node;
136 struct rb_node *prev = NULL;
137 struct rb_node *orig_prev = NULL;
138 struct extent_map *entry;
139 struct extent_map *prev_entry = NULL;
140
141 ASSERT(prev_or_next_ret);
142
143 while (n) {
144 entry = rb_entry(n, struct extent_map, rb_node);
145 prev = n;
146 prev_entry = entry;
147
148 if (offset < entry->start)
149 n = n->rb_left;
150 else if (offset >= extent_map_end(entry))
151 n = n->rb_right;
152 else
153 return n;
154 }
155
156 orig_prev = prev;
157 while (prev && offset >= extent_map_end(prev_entry)) {
158 prev = rb_next(prev);
159 prev_entry = rb_entry(prev, struct extent_map, rb_node);
160 }
161
162 /*
163 * Previous extent map found, return as in this case the caller does not
164 * care about the next one.
165 */
166 if (prev) {
167 *prev_or_next_ret = prev;
168 return NULL;
169 }
170
171 prev = orig_prev;
172 prev_entry = rb_entry(prev, struct extent_map, rb_node);
173 while (prev && offset < prev_entry->start) {
174 prev = rb_prev(prev);
175 prev_entry = rb_entry(prev, struct extent_map, rb_node);
176 }
177 *prev_or_next_ret = prev;
178
179 return NULL;
180}
181
182static inline u64 extent_map_block_end(const struct extent_map *em)
183{
184 if (em->block_start + em->block_len < em->block_start)
185 return (u64)-1;
186 return em->block_start + em->block_len;
187}
188
189static bool can_merge_extent_map(const struct extent_map *em)
190{
191 if (em->flags & EXTENT_FLAG_PINNED)
192 return false;
193
194 /* Don't merge compressed extents, we need to know their actual size. */
195 if (extent_map_is_compressed(em))
196 return false;
197
198 if (em->flags & EXTENT_FLAG_LOGGING)
199 return false;
200
201 /*
202 * We don't want to merge stuff that hasn't been written to the log yet
203 * since it may not reflect exactly what is on disk, and that would be
204 * bad.
205 */
206 if (!list_empty(&em->list))
207 return false;
208
209 return true;
210}
211
212/* Check to see if two extent_map structs are adjacent and safe to merge. */
213static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next)
214{
215 if (extent_map_end(prev) != next->start)
216 return false;
217
218 if (prev->flags != next->flags)
219 return false;
220
221 if (next->block_start < EXTENT_MAP_LAST_BYTE - 1)
222 return next->block_start == extent_map_block_end(prev);
223
224 /* HOLES and INLINE extents. */
225 return next->block_start == prev->block_start;
226}
227
228static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
229{
230 struct extent_map *merge = NULL;
231 struct rb_node *rb;
232
233 /*
234 * We can't modify an extent map that is in the tree and that is being
235 * used by another task, as it can cause that other task to see it in
236 * inconsistent state during the merging. We always have 1 reference for
237 * the tree and 1 for this task (which is unpinning the extent map or
238 * clearing the logging flag), so anything > 2 means it's being used by
239 * other tasks too.
240 */
241 if (refcount_read(&em->refs) > 2)
242 return;
243
244 if (!can_merge_extent_map(em))
245 return;
246
247 if (em->start != 0) {
248 rb = rb_prev(&em->rb_node);
249 if (rb)
250 merge = rb_entry(rb, struct extent_map, rb_node);
251 if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
252 em->start = merge->start;
253 em->orig_start = merge->orig_start;
254 em->len += merge->len;
255 em->block_len += merge->block_len;
256 em->block_start = merge->block_start;
257 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
258 em->mod_start = merge->mod_start;
259 em->generation = max(em->generation, merge->generation);
260 em->flags |= EXTENT_FLAG_MERGED;
261
262 rb_erase_cached(&merge->rb_node, &tree->map);
263 RB_CLEAR_NODE(&merge->rb_node);
264 free_extent_map(merge);
265 }
266 }
267
268 rb = rb_next(&em->rb_node);
269 if (rb)
270 merge = rb_entry(rb, struct extent_map, rb_node);
271 if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
272 em->len += merge->len;
273 em->block_len += merge->block_len;
274 rb_erase_cached(&merge->rb_node, &tree->map);
275 RB_CLEAR_NODE(&merge->rb_node);
276 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
277 em->generation = max(em->generation, merge->generation);
278 em->flags |= EXTENT_FLAG_MERGED;
279 free_extent_map(merge);
280 }
281}
282
283/*
284 * Unpin an extent from the cache.
285 *
286 * @inode: the inode from which we are unpinning an extent range
287 * @start: logical offset in the file
288 * @len: length of the extent
289 * @gen: generation that this extent has been modified in
290 *
291 * Called after an extent has been written to disk properly. Set the generation
292 * to the generation that actually added the file item to the inode so we know
293 * we need to sync this extent when we call fsync().
294 */
295int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
296{
297 struct btrfs_fs_info *fs_info = inode->root->fs_info;
298 struct extent_map_tree *tree = &inode->extent_tree;
299 int ret = 0;
300 struct extent_map *em;
301 bool prealloc = false;
302
303 write_lock(&tree->lock);
304 em = lookup_extent_mapping(tree, start, len);
305
306 if (WARN_ON(!em)) {
307 btrfs_warn(fs_info,
308"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
309 btrfs_ino(inode), btrfs_root_id(inode->root),
310 start, len, gen);
311 goto out;
312 }
313
314 if (WARN_ON(em->start != start))
315 btrfs_warn(fs_info,
316"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
317 btrfs_ino(inode), btrfs_root_id(inode->root),
318 em->start, start, len, gen);
319
320 em->generation = gen;
321 em->flags &= ~EXTENT_FLAG_PINNED;
322 em->mod_start = em->start;
323 em->mod_len = em->len;
324
325 if (em->flags & EXTENT_FLAG_FILLING) {
326 prealloc = true;
327 em->flags &= ~EXTENT_FLAG_FILLING;
328 }
329
330 try_merge_map(tree, em);
331
332 if (prealloc) {
333 em->mod_start = em->start;
334 em->mod_len = em->len;
335 }
336
337 free_extent_map(em);
338out:
339 write_unlock(&tree->lock);
340 return ret;
341
342}
343
344void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
345{
346 lockdep_assert_held_write(&tree->lock);
347
348 em->flags &= ~EXTENT_FLAG_LOGGING;
349 if (extent_map_in_tree(em))
350 try_merge_map(tree, em);
351}
352
353static inline void setup_extent_mapping(struct extent_map_tree *tree,
354 struct extent_map *em,
355 int modified)
356{
357 refcount_inc(&em->refs);
358 em->mod_start = em->start;
359 em->mod_len = em->len;
360
361 ASSERT(list_empty(&em->list));
362
363 if (modified)
364 list_add(&em->list, &tree->modified_extents);
365 else
366 try_merge_map(tree, em);
367}
368
369/*
370 * Add new extent map to the extent tree
371 *
372 * @tree: tree to insert new map in
373 * @em: map to insert
374 * @modified: indicate whether the given @em should be added to the
375 * modified list, which indicates the extent needs to be logged
376 *
377 * Insert @em into @tree or perform a simple forward/backward merge with
378 * existing mappings. The extent_map struct passed in will be inserted
379 * into the tree directly, with an additional reference taken, or a
380 * reference dropped if the merge attempt was successful.
381 */
382static int add_extent_mapping(struct extent_map_tree *tree,
383 struct extent_map *em, int modified)
384{
385 int ret = 0;
386
387 lockdep_assert_held_write(&tree->lock);
388
389 ret = tree_insert(&tree->map, em);
390 if (ret)
391 goto out;
392
393 setup_extent_mapping(tree, em, modified);
394out:
395 return ret;
396}
397
398static struct extent_map *
399__lookup_extent_mapping(struct extent_map_tree *tree,
400 u64 start, u64 len, int strict)
401{
402 struct extent_map *em;
403 struct rb_node *rb_node;
404 struct rb_node *prev_or_next = NULL;
405 u64 end = range_end(start, len);
406
407 rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
408 if (!rb_node) {
409 if (prev_or_next)
410 rb_node = prev_or_next;
411 else
412 return NULL;
413 }
414
415 em = rb_entry(rb_node, struct extent_map, rb_node);
416
417 if (strict && !(end > em->start && start < extent_map_end(em)))
418 return NULL;
419
420 refcount_inc(&em->refs);
421 return em;
422}
423
424/*
425 * Lookup extent_map that intersects @start + @len range.
426 *
427 * @tree: tree to lookup in
428 * @start: byte offset to start the search
429 * @len: length of the lookup range
430 *
431 * Find and return the first extent_map struct in @tree that intersects the
432 * [start, len] range. There may be additional objects in the tree that
433 * intersect, so check the object returned carefully to make sure that no
434 * additional lookups are needed.
435 */
436struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
437 u64 start, u64 len)
438{
439 return __lookup_extent_mapping(tree, start, len, 1);
440}
441
442/*
443 * Find a nearby extent map intersecting @start + @len (not an exact search).
444 *
445 * @tree: tree to lookup in
446 * @start: byte offset to start the search
447 * @len: length of the lookup range
448 *
449 * Find and return the first extent_map struct in @tree that intersects the
450 * [start, len] range.
451 *
452 * If one can't be found, any nearby extent may be returned
453 */
454struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
455 u64 start, u64 len)
456{
457 return __lookup_extent_mapping(tree, start, len, 0);
458}
459
460/*
461 * Remove an extent_map from the extent tree.
462 *
463 * @tree: extent tree to remove from
464 * @em: extent map being removed
465 *
466 * Remove @em from @tree. No reference counts are dropped, and no checks
467 * are done to see if the range is in use.
468 */
469void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
470{
471 lockdep_assert_held_write(&tree->lock);
472
473 WARN_ON(em->flags & EXTENT_FLAG_PINNED);
474 rb_erase_cached(&em->rb_node, &tree->map);
475 if (!(em->flags & EXTENT_FLAG_LOGGING))
476 list_del_init(&em->list);
477 RB_CLEAR_NODE(&em->rb_node);
478}
479
480static void replace_extent_mapping(struct extent_map_tree *tree,
481 struct extent_map *cur,
482 struct extent_map *new,
483 int modified)
484{
485 lockdep_assert_held_write(&tree->lock);
486
487 WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
488 ASSERT(extent_map_in_tree(cur));
489 if (!(cur->flags & EXTENT_FLAG_LOGGING))
490 list_del_init(&cur->list);
491 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
492 RB_CLEAR_NODE(&cur->rb_node);
493
494 setup_extent_mapping(tree, new, modified);
495}
496
497static struct extent_map *next_extent_map(const struct extent_map *em)
498{
499 struct rb_node *next;
500
501 next = rb_next(&em->rb_node);
502 if (!next)
503 return NULL;
504 return container_of(next, struct extent_map, rb_node);
505}
506
507static struct extent_map *prev_extent_map(struct extent_map *em)
508{
509 struct rb_node *prev;
510
511 prev = rb_prev(&em->rb_node);
512 if (!prev)
513 return NULL;
514 return container_of(prev, struct extent_map, rb_node);
515}
516
517/*
518 * Helper for btrfs_get_extent. Given an existing extent in the tree,
519 * the existing extent is the nearest extent to map_start,
520 * and an extent that you want to insert, deal with overlap and insert
521 * the best fitted new extent into the tree.
522 */
523static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
524 struct extent_map *existing,
525 struct extent_map *em,
526 u64 map_start)
527{
528 struct extent_map *prev;
529 struct extent_map *next;
530 u64 start;
531 u64 end;
532 u64 start_diff;
533
534 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
535
536 if (existing->start > map_start) {
537 next = existing;
538 prev = prev_extent_map(next);
539 } else {
540 prev = existing;
541 next = next_extent_map(prev);
542 }
543
544 start = prev ? extent_map_end(prev) : em->start;
545 start = max_t(u64, start, em->start);
546 end = next ? next->start : extent_map_end(em);
547 end = min_t(u64, end, extent_map_end(em));
548 start_diff = start - em->start;
549 em->start = start;
550 em->len = end - start;
551 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
552 !extent_map_is_compressed(em)) {
553 em->block_start += start_diff;
554 em->block_len = em->len;
555 }
556 return add_extent_mapping(em_tree, em, 0);
557}
558
559/*
560 * Add extent mapping into em_tree.
561 *
562 * @fs_info: the filesystem
563 * @em_tree: extent tree into which we want to insert the extent mapping
564 * @em_in: extent we are inserting
565 * @start: start of the logical range btrfs_get_extent() is requesting
566 * @len: length of the logical range btrfs_get_extent() is requesting
567 *
568 * Note that @em_in's range may be different from [start, start+len),
569 * but they must be overlapped.
570 *
571 * Insert @em_in into @em_tree. In case there is an overlapping range, handle
572 * the -EEXIST by either:
573 * a) Returning the existing extent in @em_in if @start is within the
574 * existing em.
575 * b) Merge the existing extent with @em_in passed in.
576 *
577 * Return 0 on success, otherwise -EEXIST.
578 *
579 */
580int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
581 struct extent_map_tree *em_tree,
582 struct extent_map **em_in, u64 start, u64 len)
583{
584 int ret;
585 struct extent_map *em = *em_in;
586
587 /*
588 * Tree-checker should have rejected any inline extent with non-zero
589 * file offset. Here just do a sanity check.
590 */
591 if (em->block_start == EXTENT_MAP_INLINE)
592 ASSERT(em->start == 0);
593
594 ret = add_extent_mapping(em_tree, em, 0);
595 /* it is possible that someone inserted the extent into the tree
596 * while we had the lock dropped. It is also possible that
597 * an overlapping map exists in the tree
598 */
599 if (ret == -EEXIST) {
600 struct extent_map *existing;
601
602 existing = search_extent_mapping(em_tree, start, len);
603
604 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
605
606 /*
607 * existing will always be non-NULL, since there must be
608 * extent causing the -EEXIST.
609 */
610 if (start >= existing->start &&
611 start < extent_map_end(existing)) {
612 free_extent_map(em);
613 *em_in = existing;
614 ret = 0;
615 } else {
616 u64 orig_start = em->start;
617 u64 orig_len = em->len;
618
619 /*
620 * The existing extent map is the one nearest to
621 * the [start, start + len) range which overlaps
622 */
623 ret = merge_extent_mapping(em_tree, existing,
624 em, start);
625 if (ret) {
626 free_extent_map(em);
627 *em_in = NULL;
628 WARN_ONCE(ret,
629"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
630 ret, existing->start, existing->len,
631 orig_start, orig_len);
632 }
633 free_extent_map(existing);
634 }
635 }
636
637 ASSERT(ret == 0 || ret == -EEXIST);
638 return ret;
639}
640
641/*
642 * Drop all extent maps from a tree in the fastest possible way, rescheduling
643 * if needed. This avoids searching the tree, from the root down to the first
644 * extent map, before each deletion.
645 */
646static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
647{
648 write_lock(&tree->lock);
649 while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
650 struct extent_map *em;
651 struct rb_node *node;
652
653 node = rb_first_cached(&tree->map);
654 em = rb_entry(node, struct extent_map, rb_node);
655 em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
656 remove_extent_mapping(tree, em);
657 free_extent_map(em);
658 cond_resched_rwlock_write(&tree->lock);
659 }
660 write_unlock(&tree->lock);
661}
662
663/*
664 * Drop all extent maps in a given range.
665 *
666 * @inode: The target inode.
667 * @start: Start offset of the range.
668 * @end: End offset of the range (inclusive value).
669 * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
670 *
671 * This drops all the extent maps that intersect the given range [@start, @end].
672 * Extent maps that partially overlap the range and extend behind or beyond it,
673 * are split.
674 * The caller should have locked an appropriate file range in the inode's io
675 * tree before calling this function.
676 */
677void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
678 bool skip_pinned)
679{
680 struct extent_map *split;
681 struct extent_map *split2;
682 struct extent_map *em;
683 struct extent_map_tree *em_tree = &inode->extent_tree;
684 u64 len = end - start + 1;
685
686 WARN_ON(end < start);
687 if (end == (u64)-1) {
688 if (start == 0 && !skip_pinned) {
689 drop_all_extent_maps_fast(em_tree);
690 return;
691 }
692 len = (u64)-1;
693 } else {
694 /* Make end offset exclusive for use in the loop below. */
695 end++;
696 }
697
698 /*
699 * It's ok if we fail to allocate the extent maps, see the comment near
700 * the bottom of the loop below. We only need two spare extent maps in
701 * the worst case, where the first extent map that intersects our range
702 * starts before the range and the last extent map that intersects our
703 * range ends after our range (and they might be the same extent map),
704 * because we need to split those two extent maps at the boundaries.
705 */
706 split = alloc_extent_map();
707 split2 = alloc_extent_map();
708
709 write_lock(&em_tree->lock);
710 em = lookup_extent_mapping(em_tree, start, len);
711
712 while (em) {
713 /* extent_map_end() returns exclusive value (last byte + 1). */
714 const u64 em_end = extent_map_end(em);
715 struct extent_map *next_em = NULL;
716 u64 gen;
717 unsigned long flags;
718 bool modified;
719 bool compressed;
720
721 if (em_end < end) {
722 next_em = next_extent_map(em);
723 if (next_em) {
724 if (next_em->start < end)
725 refcount_inc(&next_em->refs);
726 else
727 next_em = NULL;
728 }
729 }
730
731 if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) {
732 start = em_end;
733 goto next;
734 }
735
736 flags = em->flags;
737 /*
738 * In case we split the extent map, we want to preserve the
739 * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
740 * it on the new extent maps.
741 */
742 em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
743 modified = !list_empty(&em->list);
744
745 /*
746 * The extent map does not cross our target range, so no need to
747 * split it, we can remove it directly.
748 */
749 if (em->start >= start && em_end <= end)
750 goto remove_em;
751
752 gen = em->generation;
753 compressed = extent_map_is_compressed(em);
754
755 if (em->start < start) {
756 if (!split) {
757 split = split2;
758 split2 = NULL;
759 if (!split)
760 goto remove_em;
761 }
762 split->start = em->start;
763 split->len = start - em->start;
764
765 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
766 split->orig_start = em->orig_start;
767 split->block_start = em->block_start;
768
769 if (compressed)
770 split->block_len = em->block_len;
771 else
772 split->block_len = split->len;
773 split->orig_block_len = max(split->block_len,
774 em->orig_block_len);
775 split->ram_bytes = em->ram_bytes;
776 } else {
777 split->orig_start = split->start;
778 split->block_len = 0;
779 split->block_start = em->block_start;
780 split->orig_block_len = 0;
781 split->ram_bytes = split->len;
782 }
783
784 split->generation = gen;
785 split->flags = flags;
786 replace_extent_mapping(em_tree, em, split, modified);
787 free_extent_map(split);
788 split = split2;
789 split2 = NULL;
790 }
791 if (em_end > end) {
792 if (!split) {
793 split = split2;
794 split2 = NULL;
795 if (!split)
796 goto remove_em;
797 }
798 split->start = end;
799 split->len = em_end - end;
800 split->block_start = em->block_start;
801 split->flags = flags;
802 split->generation = gen;
803
804 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
805 split->orig_block_len = max(em->block_len,
806 em->orig_block_len);
807
808 split->ram_bytes = em->ram_bytes;
809 if (compressed) {
810 split->block_len = em->block_len;
811 split->orig_start = em->orig_start;
812 } else {
813 const u64 diff = start + len - em->start;
814
815 split->block_len = split->len;
816 split->block_start += diff;
817 split->orig_start = em->orig_start;
818 }
819 } else {
820 split->ram_bytes = split->len;
821 split->orig_start = split->start;
822 split->block_len = 0;
823 split->orig_block_len = 0;
824 }
825
826 if (extent_map_in_tree(em)) {
827 replace_extent_mapping(em_tree, em, split,
828 modified);
829 } else {
830 int ret;
831
832 ret = add_extent_mapping(em_tree, split,
833 modified);
834 /* Logic error, shouldn't happen. */
835 ASSERT(ret == 0);
836 if (WARN_ON(ret != 0) && modified)
837 btrfs_set_inode_full_sync(inode);
838 }
839 free_extent_map(split);
840 split = NULL;
841 }
842remove_em:
843 if (extent_map_in_tree(em)) {
844 /*
845 * If the extent map is still in the tree it means that
846 * either of the following is true:
847 *
848 * 1) It fits entirely in our range (doesn't end beyond
849 * it or starts before it);
850 *
851 * 2) It starts before our range and/or ends after our
852 * range, and we were not able to allocate the extent
853 * maps for split operations, @split and @split2.
854 *
855 * If we are at case 2) then we just remove the entire
856 * extent map - this is fine since if anyone needs it to
857 * access the subranges outside our range, will just
858 * load it again from the subvolume tree's file extent
859 * item. However if the extent map was in the list of
860 * modified extents, then we must mark the inode for a
861 * full fsync, otherwise a fast fsync will miss this
862 * extent if it's new and needs to be logged.
863 */
864 if ((em->start < start || em_end > end) && modified) {
865 ASSERT(!split);
866 btrfs_set_inode_full_sync(inode);
867 }
868 remove_extent_mapping(em_tree, em);
869 }
870
871 /*
872 * Once for the tree reference (we replaced or removed the
873 * extent map from the tree).
874 */
875 free_extent_map(em);
876next:
877 /* Once for us (for our lookup reference). */
878 free_extent_map(em);
879
880 em = next_em;
881 }
882
883 write_unlock(&em_tree->lock);
884
885 free_extent_map(split);
886 free_extent_map(split2);
887}
888
889/*
890 * Replace a range in the inode's extent map tree with a new extent map.
891 *
892 * @inode: The target inode.
893 * @new_em: The new extent map to add to the inode's extent map tree.
894 * @modified: Indicate if the new extent map should be added to the list of
895 * modified extents (for fast fsync tracking).
896 *
897 * Drops all the extent maps in the inode's extent map tree that intersect the
898 * range of the new extent map and adds the new extent map to the tree.
899 * The caller should have locked an appropriate file range in the inode's io
900 * tree before calling this function.
901 */
902int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
903 struct extent_map *new_em,
904 bool modified)
905{
906 const u64 end = new_em->start + new_em->len - 1;
907 struct extent_map_tree *tree = &inode->extent_tree;
908 int ret;
909
910 ASSERT(!extent_map_in_tree(new_em));
911
912 /*
913 * The caller has locked an appropriate file range in the inode's io
914 * tree, but getting -EEXIST when adding the new extent map can still
915 * happen in case there are extents that partially cover the range, and
916 * this is due to two tasks operating on different parts of the extent.
917 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
918 * btrfs_get_extent") for an example and details.
919 */
920 do {
921 btrfs_drop_extent_map_range(inode, new_em->start, end, false);
922 write_lock(&tree->lock);
923 ret = add_extent_mapping(tree, new_em, modified);
924 write_unlock(&tree->lock);
925 } while (ret == -EEXIST);
926
927 return ret;
928}
929
930/*
931 * Split off the first pre bytes from the extent_map at [start, start + len],
932 * and set the block_start for it to new_logical.
933 *
934 * This function is used when an ordered_extent needs to be split.
935 */
936int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
937 u64 new_logical)
938{
939 struct extent_map_tree *em_tree = &inode->extent_tree;
940 struct extent_map *em;
941 struct extent_map *split_pre = NULL;
942 struct extent_map *split_mid = NULL;
943 int ret = 0;
944 unsigned long flags;
945
946 ASSERT(pre != 0);
947 ASSERT(pre < len);
948
949 split_pre = alloc_extent_map();
950 if (!split_pre)
951 return -ENOMEM;
952 split_mid = alloc_extent_map();
953 if (!split_mid) {
954 ret = -ENOMEM;
955 goto out_free_pre;
956 }
957
958 lock_extent(&inode->io_tree, start, start + len - 1, NULL);
959 write_lock(&em_tree->lock);
960 em = lookup_extent_mapping(em_tree, start, len);
961 if (!em) {
962 ret = -EIO;
963 goto out_unlock;
964 }
965
966 ASSERT(em->len == len);
967 ASSERT(!extent_map_is_compressed(em));
968 ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
969 ASSERT(em->flags & EXTENT_FLAG_PINNED);
970 ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
971 ASSERT(!list_empty(&em->list));
972
973 flags = em->flags;
974 em->flags &= ~EXTENT_FLAG_PINNED;
975
976 /* First, replace the em with a new extent_map starting from * em->start */
977 split_pre->start = em->start;
978 split_pre->len = pre;
979 split_pre->orig_start = split_pre->start;
980 split_pre->block_start = new_logical;
981 split_pre->block_len = split_pre->len;
982 split_pre->orig_block_len = split_pre->block_len;
983 split_pre->ram_bytes = split_pre->len;
984 split_pre->flags = flags;
985 split_pre->generation = em->generation;
986
987 replace_extent_mapping(em_tree, em, split_pre, 1);
988
989 /*
990 * Now we only have an extent_map at:
991 * [em->start, em->start + pre]
992 */
993
994 /* Insert the middle extent_map. */
995 split_mid->start = em->start + pre;
996 split_mid->len = em->len - pre;
997 split_mid->orig_start = split_mid->start;
998 split_mid->block_start = em->block_start + pre;
999 split_mid->block_len = split_mid->len;
1000 split_mid->orig_block_len = split_mid->block_len;
1001 split_mid->ram_bytes = split_mid->len;
1002 split_mid->flags = flags;
1003 split_mid->generation = em->generation;
1004 add_extent_mapping(em_tree, split_mid, 1);
1005
1006 /* Once for us */
1007 free_extent_map(em);
1008 /* Once for the tree */
1009 free_extent_map(em);
1010
1011out_unlock:
1012 write_unlock(&em_tree->lock);
1013 unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
1014 free_extent_map(split_mid);
1015out_free_pre:
1016 free_extent_map(split_pre);
1017 return ret;
1018}