Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>
9#include <linux/blkdev.h>
10#include <linux/rbtree.h>
11#include <linux/slab.h>
12#include "ctree.h"
13#include "disk-io.h"
14#include "transaction.h"
15#include "volumes.h"
16#include "locking.h"
17#include "btrfs_inode.h"
18#include "async-thread.h"
19#include "free-space-cache.h"
20#include "inode-map.h"
21#include "qgroup.h"
22#include "print-tree.h"
23#include "delalloc-space.h"
24#include "block-group.h"
25
26/*
27 * backref_node, mapping_node and tree_block start with this
28 */
29struct tree_entry {
30 struct rb_node rb_node;
31 u64 bytenr;
32};
33
34/*
35 * present a tree block in the backref cache
36 */
37struct backref_node {
38 struct rb_node rb_node;
39 u64 bytenr;
40
41 u64 new_bytenr;
42 /* objectid of tree block owner, can be not uptodate */
43 u64 owner;
44 /* link to pending, changed or detached list */
45 struct list_head list;
46 /* list of upper level blocks reference this block */
47 struct list_head upper;
48 /* list of child blocks in the cache */
49 struct list_head lower;
50 /* NULL if this node is not tree root */
51 struct btrfs_root *root;
52 /* extent buffer got by COW the block */
53 struct extent_buffer *eb;
54 /* level of tree block */
55 unsigned int level:8;
56 /* is the block in non-reference counted tree */
57 unsigned int cowonly:1;
58 /* 1 if no child node in the cache */
59 unsigned int lowest:1;
60 /* is the extent buffer locked */
61 unsigned int locked:1;
62 /* has the block been processed */
63 unsigned int processed:1;
64 /* have backrefs of this block been checked */
65 unsigned int checked:1;
66 /*
67 * 1 if corresponding block has been cowed but some upper
68 * level block pointers may not point to the new location
69 */
70 unsigned int pending:1;
71 /*
72 * 1 if the backref node isn't connected to any other
73 * backref node.
74 */
75 unsigned int detached:1;
76};
77
78/*
79 * present a block pointer in the backref cache
80 */
81struct backref_edge {
82 struct list_head list[2];
83 struct backref_node *node[2];
84};
85
86#define LOWER 0
87#define UPPER 1
88#define RELOCATION_RESERVED_NODES 256
89
90struct backref_cache {
91 /* red black tree of all backref nodes in the cache */
92 struct rb_root rb_root;
93 /* for passing backref nodes to btrfs_reloc_cow_block */
94 struct backref_node *path[BTRFS_MAX_LEVEL];
95 /*
96 * list of blocks that have been cowed but some block
97 * pointers in upper level blocks may not reflect the
98 * new location
99 */
100 struct list_head pending[BTRFS_MAX_LEVEL];
101 /* list of backref nodes with no child node */
102 struct list_head leaves;
103 /* list of blocks that have been cowed in current transaction */
104 struct list_head changed;
105 /* list of detached backref node. */
106 struct list_head detached;
107
108 u64 last_trans;
109
110 int nr_nodes;
111 int nr_edges;
112};
113
114/*
115 * map address of tree root to tree
116 */
117struct mapping_node {
118 struct rb_node rb_node;
119 u64 bytenr;
120 void *data;
121};
122
123struct mapping_tree {
124 struct rb_root rb_root;
125 spinlock_t lock;
126};
127
128/*
129 * present a tree block to process
130 */
131struct tree_block {
132 struct rb_node rb_node;
133 u64 bytenr;
134 struct btrfs_key key;
135 unsigned int level:8;
136 unsigned int key_ready:1;
137};
138
139#define MAX_EXTENTS 128
140
141struct file_extent_cluster {
142 u64 start;
143 u64 end;
144 u64 boundary[MAX_EXTENTS];
145 unsigned int nr;
146};
147
148struct reloc_control {
149 /* block group to relocate */
150 struct btrfs_block_group_cache *block_group;
151 /* extent tree */
152 struct btrfs_root *extent_root;
153 /* inode for moving data */
154 struct inode *data_inode;
155
156 struct btrfs_block_rsv *block_rsv;
157
158 struct backref_cache backref_cache;
159
160 struct file_extent_cluster cluster;
161 /* tree blocks have been processed */
162 struct extent_io_tree processed_blocks;
163 /* map start of tree root to corresponding reloc tree */
164 struct mapping_tree reloc_root_tree;
165 /* list of reloc trees */
166 struct list_head reloc_roots;
167 /* list of subvolume trees that get relocated */
168 struct list_head dirty_subvol_roots;
169 /* size of metadata reservation for merging reloc trees */
170 u64 merging_rsv_size;
171 /* size of relocated tree nodes */
172 u64 nodes_relocated;
173 /* reserved size for block group relocation*/
174 u64 reserved_bytes;
175
176 u64 search_start;
177 u64 extents_found;
178
179 unsigned int stage:8;
180 unsigned int create_reloc_tree:1;
181 unsigned int merge_reloc_tree:1;
182 unsigned int found_file_extent:1;
183};
184
185/* stages of data relocation */
186#define MOVE_DATA_EXTENTS 0
187#define UPDATE_DATA_PTRS 1
188
189static void remove_backref_node(struct backref_cache *cache,
190 struct backref_node *node);
191static void __mark_block_processed(struct reloc_control *rc,
192 struct backref_node *node);
193
194static void mapping_tree_init(struct mapping_tree *tree)
195{
196 tree->rb_root = RB_ROOT;
197 spin_lock_init(&tree->lock);
198}
199
200static void backref_cache_init(struct backref_cache *cache)
201{
202 int i;
203 cache->rb_root = RB_ROOT;
204 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
205 INIT_LIST_HEAD(&cache->pending[i]);
206 INIT_LIST_HEAD(&cache->changed);
207 INIT_LIST_HEAD(&cache->detached);
208 INIT_LIST_HEAD(&cache->leaves);
209}
210
211static void backref_cache_cleanup(struct backref_cache *cache)
212{
213 struct backref_node *node;
214 int i;
215
216 while (!list_empty(&cache->detached)) {
217 node = list_entry(cache->detached.next,
218 struct backref_node, list);
219 remove_backref_node(cache, node);
220 }
221
222 while (!list_empty(&cache->leaves)) {
223 node = list_entry(cache->leaves.next,
224 struct backref_node, lower);
225 remove_backref_node(cache, node);
226 }
227
228 cache->last_trans = 0;
229
230 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
231 ASSERT(list_empty(&cache->pending[i]));
232 ASSERT(list_empty(&cache->changed));
233 ASSERT(list_empty(&cache->detached));
234 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
235 ASSERT(!cache->nr_nodes);
236 ASSERT(!cache->nr_edges);
237}
238
239static struct backref_node *alloc_backref_node(struct backref_cache *cache)
240{
241 struct backref_node *node;
242
243 node = kzalloc(sizeof(*node), GFP_NOFS);
244 if (node) {
245 INIT_LIST_HEAD(&node->list);
246 INIT_LIST_HEAD(&node->upper);
247 INIT_LIST_HEAD(&node->lower);
248 RB_CLEAR_NODE(&node->rb_node);
249 cache->nr_nodes++;
250 }
251 return node;
252}
253
254static void free_backref_node(struct backref_cache *cache,
255 struct backref_node *node)
256{
257 if (node) {
258 cache->nr_nodes--;
259 kfree(node);
260 }
261}
262
263static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
264{
265 struct backref_edge *edge;
266
267 edge = kzalloc(sizeof(*edge), GFP_NOFS);
268 if (edge)
269 cache->nr_edges++;
270 return edge;
271}
272
273static void free_backref_edge(struct backref_cache *cache,
274 struct backref_edge *edge)
275{
276 if (edge) {
277 cache->nr_edges--;
278 kfree(edge);
279 }
280}
281
282static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
283 struct rb_node *node)
284{
285 struct rb_node **p = &root->rb_node;
286 struct rb_node *parent = NULL;
287 struct tree_entry *entry;
288
289 while (*p) {
290 parent = *p;
291 entry = rb_entry(parent, struct tree_entry, rb_node);
292
293 if (bytenr < entry->bytenr)
294 p = &(*p)->rb_left;
295 else if (bytenr > entry->bytenr)
296 p = &(*p)->rb_right;
297 else
298 return parent;
299 }
300
301 rb_link_node(node, parent, p);
302 rb_insert_color(node, root);
303 return NULL;
304}
305
306static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
307{
308 struct rb_node *n = root->rb_node;
309 struct tree_entry *entry;
310
311 while (n) {
312 entry = rb_entry(n, struct tree_entry, rb_node);
313
314 if (bytenr < entry->bytenr)
315 n = n->rb_left;
316 else if (bytenr > entry->bytenr)
317 n = n->rb_right;
318 else
319 return n;
320 }
321 return NULL;
322}
323
324static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
325{
326
327 struct btrfs_fs_info *fs_info = NULL;
328 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
329 rb_node);
330 if (bnode->root)
331 fs_info = bnode->root->fs_info;
332 btrfs_panic(fs_info, errno,
333 "Inconsistency in backref cache found at offset %llu",
334 bytenr);
335}
336
337/*
338 * walk up backref nodes until reach node presents tree root
339 */
340static struct backref_node *walk_up_backref(struct backref_node *node,
341 struct backref_edge *edges[],
342 int *index)
343{
344 struct backref_edge *edge;
345 int idx = *index;
346
347 while (!list_empty(&node->upper)) {
348 edge = list_entry(node->upper.next,
349 struct backref_edge, list[LOWER]);
350 edges[idx++] = edge;
351 node = edge->node[UPPER];
352 }
353 BUG_ON(node->detached);
354 *index = idx;
355 return node;
356}
357
358/*
359 * walk down backref nodes to find start of next reference path
360 */
361static struct backref_node *walk_down_backref(struct backref_edge *edges[],
362 int *index)
363{
364 struct backref_edge *edge;
365 struct backref_node *lower;
366 int idx = *index;
367
368 while (idx > 0) {
369 edge = edges[idx - 1];
370 lower = edge->node[LOWER];
371 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
372 idx--;
373 continue;
374 }
375 edge = list_entry(edge->list[LOWER].next,
376 struct backref_edge, list[LOWER]);
377 edges[idx - 1] = edge;
378 *index = idx;
379 return edge->node[UPPER];
380 }
381 *index = 0;
382 return NULL;
383}
384
385static void unlock_node_buffer(struct backref_node *node)
386{
387 if (node->locked) {
388 btrfs_tree_unlock(node->eb);
389 node->locked = 0;
390 }
391}
392
393static void drop_node_buffer(struct backref_node *node)
394{
395 if (node->eb) {
396 unlock_node_buffer(node);
397 free_extent_buffer(node->eb);
398 node->eb = NULL;
399 }
400}
401
402static void drop_backref_node(struct backref_cache *tree,
403 struct backref_node *node)
404{
405 BUG_ON(!list_empty(&node->upper));
406
407 drop_node_buffer(node);
408 list_del(&node->list);
409 list_del(&node->lower);
410 if (!RB_EMPTY_NODE(&node->rb_node))
411 rb_erase(&node->rb_node, &tree->rb_root);
412 free_backref_node(tree, node);
413}
414
415/*
416 * remove a backref node from the backref cache
417 */
418static void remove_backref_node(struct backref_cache *cache,
419 struct backref_node *node)
420{
421 struct backref_node *upper;
422 struct backref_edge *edge;
423
424 if (!node)
425 return;
426
427 BUG_ON(!node->lowest && !node->detached);
428 while (!list_empty(&node->upper)) {
429 edge = list_entry(node->upper.next, struct backref_edge,
430 list[LOWER]);
431 upper = edge->node[UPPER];
432 list_del(&edge->list[LOWER]);
433 list_del(&edge->list[UPPER]);
434 free_backref_edge(cache, edge);
435
436 if (RB_EMPTY_NODE(&upper->rb_node)) {
437 BUG_ON(!list_empty(&node->upper));
438 drop_backref_node(cache, node);
439 node = upper;
440 node->lowest = 1;
441 continue;
442 }
443 /*
444 * add the node to leaf node list if no other
445 * child block cached.
446 */
447 if (list_empty(&upper->lower)) {
448 list_add_tail(&upper->lower, &cache->leaves);
449 upper->lowest = 1;
450 }
451 }
452
453 drop_backref_node(cache, node);
454}
455
456static void update_backref_node(struct backref_cache *cache,
457 struct backref_node *node, u64 bytenr)
458{
459 struct rb_node *rb_node;
460 rb_erase(&node->rb_node, &cache->rb_root);
461 node->bytenr = bytenr;
462 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
463 if (rb_node)
464 backref_tree_panic(rb_node, -EEXIST, bytenr);
465}
466
467/*
468 * update backref cache after a transaction commit
469 */
470static int update_backref_cache(struct btrfs_trans_handle *trans,
471 struct backref_cache *cache)
472{
473 struct backref_node *node;
474 int level = 0;
475
476 if (cache->last_trans == 0) {
477 cache->last_trans = trans->transid;
478 return 0;
479 }
480
481 if (cache->last_trans == trans->transid)
482 return 0;
483
484 /*
485 * detached nodes are used to avoid unnecessary backref
486 * lookup. transaction commit changes the extent tree.
487 * so the detached nodes are no longer useful.
488 */
489 while (!list_empty(&cache->detached)) {
490 node = list_entry(cache->detached.next,
491 struct backref_node, list);
492 remove_backref_node(cache, node);
493 }
494
495 while (!list_empty(&cache->changed)) {
496 node = list_entry(cache->changed.next,
497 struct backref_node, list);
498 list_del_init(&node->list);
499 BUG_ON(node->pending);
500 update_backref_node(cache, node, node->new_bytenr);
501 }
502
503 /*
504 * some nodes can be left in the pending list if there were
505 * errors during processing the pending nodes.
506 */
507 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
508 list_for_each_entry(node, &cache->pending[level], list) {
509 BUG_ON(!node->pending);
510 if (node->bytenr == node->new_bytenr)
511 continue;
512 update_backref_node(cache, node, node->new_bytenr);
513 }
514 }
515
516 cache->last_trans = 0;
517 return 1;
518}
519
520
521static int should_ignore_root(struct btrfs_root *root)
522{
523 struct btrfs_root *reloc_root;
524
525 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
526 return 0;
527
528 reloc_root = root->reloc_root;
529 if (!reloc_root)
530 return 0;
531
532 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
533 root->fs_info->running_transaction->transid - 1)
534 return 0;
535 /*
536 * if there is reloc tree and it was created in previous
537 * transaction backref lookup can find the reloc tree,
538 * so backref node for the fs tree root is useless for
539 * relocation.
540 */
541 return 1;
542}
543/*
544 * find reloc tree by address of tree root
545 */
546static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
547 u64 bytenr)
548{
549 struct rb_node *rb_node;
550 struct mapping_node *node;
551 struct btrfs_root *root = NULL;
552
553 spin_lock(&rc->reloc_root_tree.lock);
554 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
555 if (rb_node) {
556 node = rb_entry(rb_node, struct mapping_node, rb_node);
557 root = (struct btrfs_root *)node->data;
558 }
559 spin_unlock(&rc->reloc_root_tree.lock);
560 return root;
561}
562
563static int is_cowonly_root(u64 root_objectid)
564{
565 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
566 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
567 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
568 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
569 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
570 root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
571 root_objectid == BTRFS_UUID_TREE_OBJECTID ||
572 root_objectid == BTRFS_QUOTA_TREE_OBJECTID ||
573 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
574 return 1;
575 return 0;
576}
577
578static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
579 u64 root_objectid)
580{
581 struct btrfs_key key;
582
583 key.objectid = root_objectid;
584 key.type = BTRFS_ROOT_ITEM_KEY;
585 if (is_cowonly_root(root_objectid))
586 key.offset = 0;
587 else
588 key.offset = (u64)-1;
589
590 return btrfs_get_fs_root(fs_info, &key, false);
591}
592
593static noinline_for_stack
594int find_inline_backref(struct extent_buffer *leaf, int slot,
595 unsigned long *ptr, unsigned long *end)
596{
597 struct btrfs_key key;
598 struct btrfs_extent_item *ei;
599 struct btrfs_tree_block_info *bi;
600 u32 item_size;
601
602 btrfs_item_key_to_cpu(leaf, &key, slot);
603
604 item_size = btrfs_item_size_nr(leaf, slot);
605 if (item_size < sizeof(*ei)) {
606 btrfs_print_v0_err(leaf->fs_info);
607 btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
608 return 1;
609 }
610 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
611 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
612 BTRFS_EXTENT_FLAG_TREE_BLOCK));
613
614 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
615 item_size <= sizeof(*ei) + sizeof(*bi)) {
616 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
617 return 1;
618 }
619 if (key.type == BTRFS_METADATA_ITEM_KEY &&
620 item_size <= sizeof(*ei)) {
621 WARN_ON(item_size < sizeof(*ei));
622 return 1;
623 }
624
625 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
626 bi = (struct btrfs_tree_block_info *)(ei + 1);
627 *ptr = (unsigned long)(bi + 1);
628 } else {
629 *ptr = (unsigned long)(ei + 1);
630 }
631 *end = (unsigned long)ei + item_size;
632 return 0;
633}
634
635/*
636 * build backref tree for a given tree block. root of the backref tree
637 * corresponds the tree block, leaves of the backref tree correspond
638 * roots of b-trees that reference the tree block.
639 *
640 * the basic idea of this function is check backrefs of a given block
641 * to find upper level blocks that reference the block, and then check
642 * backrefs of these upper level blocks recursively. the recursion stop
643 * when tree root is reached or backrefs for the block is cached.
644 *
645 * NOTE: if we find backrefs for a block are cached, we know backrefs
646 * for all upper level blocks that directly/indirectly reference the
647 * block are also cached.
648 */
649static noinline_for_stack
650struct backref_node *build_backref_tree(struct reloc_control *rc,
651 struct btrfs_key *node_key,
652 int level, u64 bytenr)
653{
654 struct backref_cache *cache = &rc->backref_cache;
655 struct btrfs_path *path1; /* For searching extent root */
656 struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
657 struct extent_buffer *eb;
658 struct btrfs_root *root;
659 struct backref_node *cur;
660 struct backref_node *upper;
661 struct backref_node *lower;
662 struct backref_node *node = NULL;
663 struct backref_node *exist = NULL;
664 struct backref_edge *edge;
665 struct rb_node *rb_node;
666 struct btrfs_key key;
667 unsigned long end;
668 unsigned long ptr;
669 LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
670 LIST_HEAD(useless);
671 int cowonly;
672 int ret;
673 int err = 0;
674 bool need_check = true;
675
676 path1 = btrfs_alloc_path();
677 path2 = btrfs_alloc_path();
678 if (!path1 || !path2) {
679 err = -ENOMEM;
680 goto out;
681 }
682 path1->reada = READA_FORWARD;
683 path2->reada = READA_FORWARD;
684
685 node = alloc_backref_node(cache);
686 if (!node) {
687 err = -ENOMEM;
688 goto out;
689 }
690
691 node->bytenr = bytenr;
692 node->level = level;
693 node->lowest = 1;
694 cur = node;
695again:
696 end = 0;
697 ptr = 0;
698 key.objectid = cur->bytenr;
699 key.type = BTRFS_METADATA_ITEM_KEY;
700 key.offset = (u64)-1;
701
702 path1->search_commit_root = 1;
703 path1->skip_locking = 1;
704 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
705 0, 0);
706 if (ret < 0) {
707 err = ret;
708 goto out;
709 }
710 ASSERT(ret);
711 ASSERT(path1->slots[0]);
712
713 path1->slots[0]--;
714
715 WARN_ON(cur->checked);
716 if (!list_empty(&cur->upper)) {
717 /*
718 * the backref was added previously when processing
719 * backref of type BTRFS_TREE_BLOCK_REF_KEY
720 */
721 ASSERT(list_is_singular(&cur->upper));
722 edge = list_entry(cur->upper.next, struct backref_edge,
723 list[LOWER]);
724 ASSERT(list_empty(&edge->list[UPPER]));
725 exist = edge->node[UPPER];
726 /*
727 * add the upper level block to pending list if we need
728 * check its backrefs
729 */
730 if (!exist->checked)
731 list_add_tail(&edge->list[UPPER], &list);
732 } else {
733 exist = NULL;
734 }
735
736 while (1) {
737 cond_resched();
738 eb = path1->nodes[0];
739
740 if (ptr >= end) {
741 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
742 ret = btrfs_next_leaf(rc->extent_root, path1);
743 if (ret < 0) {
744 err = ret;
745 goto out;
746 }
747 if (ret > 0)
748 break;
749 eb = path1->nodes[0];
750 }
751
752 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
753 if (key.objectid != cur->bytenr) {
754 WARN_ON(exist);
755 break;
756 }
757
758 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
759 key.type == BTRFS_METADATA_ITEM_KEY) {
760 ret = find_inline_backref(eb, path1->slots[0],
761 &ptr, &end);
762 if (ret)
763 goto next;
764 }
765 }
766
767 if (ptr < end) {
768 /* update key for inline back ref */
769 struct btrfs_extent_inline_ref *iref;
770 int type;
771 iref = (struct btrfs_extent_inline_ref *)ptr;
772 type = btrfs_get_extent_inline_ref_type(eb, iref,
773 BTRFS_REF_TYPE_BLOCK);
774 if (type == BTRFS_REF_TYPE_INVALID) {
775 err = -EUCLEAN;
776 goto out;
777 }
778 key.type = type;
779 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
780
781 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
782 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
783 }
784
785 /*
786 * Parent node found and matches current inline ref, no need to
787 * rebuild this node for this inline ref.
788 */
789 if (exist &&
790 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
791 exist->owner == key.offset) ||
792 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
793 exist->bytenr == key.offset))) {
794 exist = NULL;
795 goto next;
796 }
797
798 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
799 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
800 if (key.objectid == key.offset) {
801 /*
802 * Only root blocks of reloc trees use backref
803 * pointing to itself.
804 */
805 root = find_reloc_root(rc, cur->bytenr);
806 ASSERT(root);
807 cur->root = root;
808 break;
809 }
810
811 edge = alloc_backref_edge(cache);
812 if (!edge) {
813 err = -ENOMEM;
814 goto out;
815 }
816 rb_node = tree_search(&cache->rb_root, key.offset);
817 if (!rb_node) {
818 upper = alloc_backref_node(cache);
819 if (!upper) {
820 free_backref_edge(cache, edge);
821 err = -ENOMEM;
822 goto out;
823 }
824 upper->bytenr = key.offset;
825 upper->level = cur->level + 1;
826 /*
827 * backrefs for the upper level block isn't
828 * cached, add the block to pending list
829 */
830 list_add_tail(&edge->list[UPPER], &list);
831 } else {
832 upper = rb_entry(rb_node, struct backref_node,
833 rb_node);
834 ASSERT(upper->checked);
835 INIT_LIST_HEAD(&edge->list[UPPER]);
836 }
837 list_add_tail(&edge->list[LOWER], &cur->upper);
838 edge->node[LOWER] = cur;
839 edge->node[UPPER] = upper;
840
841 goto next;
842 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
843 err = -EINVAL;
844 btrfs_print_v0_err(rc->extent_root->fs_info);
845 btrfs_handle_fs_error(rc->extent_root->fs_info, err,
846 NULL);
847 goto out;
848 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
849 goto next;
850 }
851
852 /*
853 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
854 * means the root objectid. We need to search the tree to get
855 * its parent bytenr.
856 */
857 root = read_fs_root(rc->extent_root->fs_info, key.offset);
858 if (IS_ERR(root)) {
859 err = PTR_ERR(root);
860 goto out;
861 }
862
863 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
864 cur->cowonly = 1;
865
866 if (btrfs_root_level(&root->root_item) == cur->level) {
867 /* tree root */
868 ASSERT(btrfs_root_bytenr(&root->root_item) ==
869 cur->bytenr);
870 if (should_ignore_root(root))
871 list_add(&cur->list, &useless);
872 else
873 cur->root = root;
874 break;
875 }
876
877 level = cur->level + 1;
878
879 /* Search the tree to find parent blocks referring the block. */
880 path2->search_commit_root = 1;
881 path2->skip_locking = 1;
882 path2->lowest_level = level;
883 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
884 path2->lowest_level = 0;
885 if (ret < 0) {
886 err = ret;
887 goto out;
888 }
889 if (ret > 0 && path2->slots[level] > 0)
890 path2->slots[level]--;
891
892 eb = path2->nodes[level];
893 if (btrfs_node_blockptr(eb, path2->slots[level]) !=
894 cur->bytenr) {
895 btrfs_err(root->fs_info,
896 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
897 cur->bytenr, level - 1,
898 root->root_key.objectid,
899 node_key->objectid, node_key->type,
900 node_key->offset);
901 err = -ENOENT;
902 goto out;
903 }
904 lower = cur;
905 need_check = true;
906
907 /* Add all nodes and edges in the path */
908 for (; level < BTRFS_MAX_LEVEL; level++) {
909 if (!path2->nodes[level]) {
910 ASSERT(btrfs_root_bytenr(&root->root_item) ==
911 lower->bytenr);
912 if (should_ignore_root(root))
913 list_add(&lower->list, &useless);
914 else
915 lower->root = root;
916 break;
917 }
918
919 edge = alloc_backref_edge(cache);
920 if (!edge) {
921 err = -ENOMEM;
922 goto out;
923 }
924
925 eb = path2->nodes[level];
926 rb_node = tree_search(&cache->rb_root, eb->start);
927 if (!rb_node) {
928 upper = alloc_backref_node(cache);
929 if (!upper) {
930 free_backref_edge(cache, edge);
931 err = -ENOMEM;
932 goto out;
933 }
934 upper->bytenr = eb->start;
935 upper->owner = btrfs_header_owner(eb);
936 upper->level = lower->level + 1;
937 if (!test_bit(BTRFS_ROOT_REF_COWS,
938 &root->state))
939 upper->cowonly = 1;
940
941 /*
942 * if we know the block isn't shared
943 * we can void checking its backrefs.
944 */
945 if (btrfs_block_can_be_shared(root, eb))
946 upper->checked = 0;
947 else
948 upper->checked = 1;
949
950 /*
951 * add the block to pending list if we
952 * need check its backrefs, we only do this once
953 * while walking up a tree as we will catch
954 * anything else later on.
955 */
956 if (!upper->checked && need_check) {
957 need_check = false;
958 list_add_tail(&edge->list[UPPER],
959 &list);
960 } else {
961 if (upper->checked)
962 need_check = true;
963 INIT_LIST_HEAD(&edge->list[UPPER]);
964 }
965 } else {
966 upper = rb_entry(rb_node, struct backref_node,
967 rb_node);
968 ASSERT(upper->checked);
969 INIT_LIST_HEAD(&edge->list[UPPER]);
970 if (!upper->owner)
971 upper->owner = btrfs_header_owner(eb);
972 }
973 list_add_tail(&edge->list[LOWER], &lower->upper);
974 edge->node[LOWER] = lower;
975 edge->node[UPPER] = upper;
976
977 if (rb_node)
978 break;
979 lower = upper;
980 upper = NULL;
981 }
982 btrfs_release_path(path2);
983next:
984 if (ptr < end) {
985 ptr += btrfs_extent_inline_ref_size(key.type);
986 if (ptr >= end) {
987 WARN_ON(ptr > end);
988 ptr = 0;
989 end = 0;
990 }
991 }
992 if (ptr >= end)
993 path1->slots[0]++;
994 }
995 btrfs_release_path(path1);
996
997 cur->checked = 1;
998 WARN_ON(exist);
999
1000 /* the pending list isn't empty, take the first block to process */
1001 if (!list_empty(&list)) {
1002 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1003 list_del_init(&edge->list[UPPER]);
1004 cur = edge->node[UPPER];
1005 goto again;
1006 }
1007
1008 /*
1009 * everything goes well, connect backref nodes and insert backref nodes
1010 * into the cache.
1011 */
1012 ASSERT(node->checked);
1013 cowonly = node->cowonly;
1014 if (!cowonly) {
1015 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1016 &node->rb_node);
1017 if (rb_node)
1018 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1019 list_add_tail(&node->lower, &cache->leaves);
1020 }
1021
1022 list_for_each_entry(edge, &node->upper, list[LOWER])
1023 list_add_tail(&edge->list[UPPER], &list);
1024
1025 while (!list_empty(&list)) {
1026 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1027 list_del_init(&edge->list[UPPER]);
1028 upper = edge->node[UPPER];
1029 if (upper->detached) {
1030 list_del(&edge->list[LOWER]);
1031 lower = edge->node[LOWER];
1032 free_backref_edge(cache, edge);
1033 if (list_empty(&lower->upper))
1034 list_add(&lower->list, &useless);
1035 continue;
1036 }
1037
1038 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1039 if (upper->lowest) {
1040 list_del_init(&upper->lower);
1041 upper->lowest = 0;
1042 }
1043
1044 list_add_tail(&edge->list[UPPER], &upper->lower);
1045 continue;
1046 }
1047
1048 if (!upper->checked) {
1049 /*
1050 * Still want to blow up for developers since this is a
1051 * logic bug.
1052 */
1053 ASSERT(0);
1054 err = -EINVAL;
1055 goto out;
1056 }
1057 if (cowonly != upper->cowonly) {
1058 ASSERT(0);
1059 err = -EINVAL;
1060 goto out;
1061 }
1062
1063 if (!cowonly) {
1064 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1065 &upper->rb_node);
1066 if (rb_node)
1067 backref_tree_panic(rb_node, -EEXIST,
1068 upper->bytenr);
1069 }
1070
1071 list_add_tail(&edge->list[UPPER], &upper->lower);
1072
1073 list_for_each_entry(edge, &upper->upper, list[LOWER])
1074 list_add_tail(&edge->list[UPPER], &list);
1075 }
1076 /*
1077 * process useless backref nodes. backref nodes for tree leaves
1078 * are deleted from the cache. backref nodes for upper level
1079 * tree blocks are left in the cache to avoid unnecessary backref
1080 * lookup.
1081 */
1082 while (!list_empty(&useless)) {
1083 upper = list_entry(useless.next, struct backref_node, list);
1084 list_del_init(&upper->list);
1085 ASSERT(list_empty(&upper->upper));
1086 if (upper == node)
1087 node = NULL;
1088 if (upper->lowest) {
1089 list_del_init(&upper->lower);
1090 upper->lowest = 0;
1091 }
1092 while (!list_empty(&upper->lower)) {
1093 edge = list_entry(upper->lower.next,
1094 struct backref_edge, list[UPPER]);
1095 list_del(&edge->list[UPPER]);
1096 list_del(&edge->list[LOWER]);
1097 lower = edge->node[LOWER];
1098 free_backref_edge(cache, edge);
1099
1100 if (list_empty(&lower->upper))
1101 list_add(&lower->list, &useless);
1102 }
1103 __mark_block_processed(rc, upper);
1104 if (upper->level > 0) {
1105 list_add(&upper->list, &cache->detached);
1106 upper->detached = 1;
1107 } else {
1108 rb_erase(&upper->rb_node, &cache->rb_root);
1109 free_backref_node(cache, upper);
1110 }
1111 }
1112out:
1113 btrfs_free_path(path1);
1114 btrfs_free_path(path2);
1115 if (err) {
1116 while (!list_empty(&useless)) {
1117 lower = list_entry(useless.next,
1118 struct backref_node, list);
1119 list_del_init(&lower->list);
1120 }
1121 while (!list_empty(&list)) {
1122 edge = list_first_entry(&list, struct backref_edge,
1123 list[UPPER]);
1124 list_del(&edge->list[UPPER]);
1125 list_del(&edge->list[LOWER]);
1126 lower = edge->node[LOWER];
1127 upper = edge->node[UPPER];
1128 free_backref_edge(cache, edge);
1129
1130 /*
1131 * Lower is no longer linked to any upper backref nodes
1132 * and isn't in the cache, we can free it ourselves.
1133 */
1134 if (list_empty(&lower->upper) &&
1135 RB_EMPTY_NODE(&lower->rb_node))
1136 list_add(&lower->list, &useless);
1137
1138 if (!RB_EMPTY_NODE(&upper->rb_node))
1139 continue;
1140
1141 /* Add this guy's upper edges to the list to process */
1142 list_for_each_entry(edge, &upper->upper, list[LOWER])
1143 list_add_tail(&edge->list[UPPER], &list);
1144 if (list_empty(&upper->upper))
1145 list_add(&upper->list, &useless);
1146 }
1147
1148 while (!list_empty(&useless)) {
1149 lower = list_entry(useless.next,
1150 struct backref_node, list);
1151 list_del_init(&lower->list);
1152 if (lower == node)
1153 node = NULL;
1154 free_backref_node(cache, lower);
1155 }
1156
1157 free_backref_node(cache, node);
1158 return ERR_PTR(err);
1159 }
1160 ASSERT(!node || !node->detached);
1161 return node;
1162}
1163
1164/*
1165 * helper to add backref node for the newly created snapshot.
1166 * the backref node is created by cloning backref node that
1167 * corresponds to root of source tree
1168 */
1169static int clone_backref_node(struct btrfs_trans_handle *trans,
1170 struct reloc_control *rc,
1171 struct btrfs_root *src,
1172 struct btrfs_root *dest)
1173{
1174 struct btrfs_root *reloc_root = src->reloc_root;
1175 struct backref_cache *cache = &rc->backref_cache;
1176 struct backref_node *node = NULL;
1177 struct backref_node *new_node;
1178 struct backref_edge *edge;
1179 struct backref_edge *new_edge;
1180 struct rb_node *rb_node;
1181
1182 if (cache->last_trans > 0)
1183 update_backref_cache(trans, cache);
1184
1185 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1186 if (rb_node) {
1187 node = rb_entry(rb_node, struct backref_node, rb_node);
1188 if (node->detached)
1189 node = NULL;
1190 else
1191 BUG_ON(node->new_bytenr != reloc_root->node->start);
1192 }
1193
1194 if (!node) {
1195 rb_node = tree_search(&cache->rb_root,
1196 reloc_root->commit_root->start);
1197 if (rb_node) {
1198 node = rb_entry(rb_node, struct backref_node,
1199 rb_node);
1200 BUG_ON(node->detached);
1201 }
1202 }
1203
1204 if (!node)
1205 return 0;
1206
1207 new_node = alloc_backref_node(cache);
1208 if (!new_node)
1209 return -ENOMEM;
1210
1211 new_node->bytenr = dest->node->start;
1212 new_node->level = node->level;
1213 new_node->lowest = node->lowest;
1214 new_node->checked = 1;
1215 new_node->root = dest;
1216
1217 if (!node->lowest) {
1218 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1219 new_edge = alloc_backref_edge(cache);
1220 if (!new_edge)
1221 goto fail;
1222
1223 new_edge->node[UPPER] = new_node;
1224 new_edge->node[LOWER] = edge->node[LOWER];
1225 list_add_tail(&new_edge->list[UPPER],
1226 &new_node->lower);
1227 }
1228 } else {
1229 list_add_tail(&new_node->lower, &cache->leaves);
1230 }
1231
1232 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1233 &new_node->rb_node);
1234 if (rb_node)
1235 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1236
1237 if (!new_node->lowest) {
1238 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1239 list_add_tail(&new_edge->list[LOWER],
1240 &new_edge->node[LOWER]->upper);
1241 }
1242 }
1243 return 0;
1244fail:
1245 while (!list_empty(&new_node->lower)) {
1246 new_edge = list_entry(new_node->lower.next,
1247 struct backref_edge, list[UPPER]);
1248 list_del(&new_edge->list[UPPER]);
1249 free_backref_edge(cache, new_edge);
1250 }
1251 free_backref_node(cache, new_node);
1252 return -ENOMEM;
1253}
1254
1255/*
1256 * helper to add 'address of tree root -> reloc tree' mapping
1257 */
1258static int __must_check __add_reloc_root(struct btrfs_root *root)
1259{
1260 struct btrfs_fs_info *fs_info = root->fs_info;
1261 struct rb_node *rb_node;
1262 struct mapping_node *node;
1263 struct reloc_control *rc = fs_info->reloc_ctl;
1264
1265 node = kmalloc(sizeof(*node), GFP_NOFS);
1266 if (!node)
1267 return -ENOMEM;
1268
1269 node->bytenr = root->node->start;
1270 node->data = root;
1271
1272 spin_lock(&rc->reloc_root_tree.lock);
1273 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1274 node->bytenr, &node->rb_node);
1275 spin_unlock(&rc->reloc_root_tree.lock);
1276 if (rb_node) {
1277 btrfs_panic(fs_info, -EEXIST,
1278 "Duplicate root found for start=%llu while inserting into relocation tree",
1279 node->bytenr);
1280 }
1281
1282 list_add_tail(&root->root_list, &rc->reloc_roots);
1283 return 0;
1284}
1285
1286/*
1287 * helper to delete the 'address of tree root -> reloc tree'
1288 * mapping
1289 */
1290static void __del_reloc_root(struct btrfs_root *root)
1291{
1292 struct btrfs_fs_info *fs_info = root->fs_info;
1293 struct rb_node *rb_node;
1294 struct mapping_node *node = NULL;
1295 struct reloc_control *rc = fs_info->reloc_ctl;
1296
1297 if (rc && root->node) {
1298 spin_lock(&rc->reloc_root_tree.lock);
1299 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1300 root->node->start);
1301 if (rb_node) {
1302 node = rb_entry(rb_node, struct mapping_node, rb_node);
1303 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1304 }
1305 spin_unlock(&rc->reloc_root_tree.lock);
1306 if (!node)
1307 return;
1308 BUG_ON((struct btrfs_root *)node->data != root);
1309 }
1310
1311 spin_lock(&fs_info->trans_lock);
1312 list_del_init(&root->root_list);
1313 spin_unlock(&fs_info->trans_lock);
1314 kfree(node);
1315}
1316
1317/*
1318 * helper to update the 'address of tree root -> reloc tree'
1319 * mapping
1320 */
1321static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1322{
1323 struct btrfs_fs_info *fs_info = root->fs_info;
1324 struct rb_node *rb_node;
1325 struct mapping_node *node = NULL;
1326 struct reloc_control *rc = fs_info->reloc_ctl;
1327
1328 spin_lock(&rc->reloc_root_tree.lock);
1329 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1330 root->node->start);
1331 if (rb_node) {
1332 node = rb_entry(rb_node, struct mapping_node, rb_node);
1333 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1334 }
1335 spin_unlock(&rc->reloc_root_tree.lock);
1336
1337 if (!node)
1338 return 0;
1339 BUG_ON((struct btrfs_root *)node->data != root);
1340
1341 spin_lock(&rc->reloc_root_tree.lock);
1342 node->bytenr = new_bytenr;
1343 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1344 node->bytenr, &node->rb_node);
1345 spin_unlock(&rc->reloc_root_tree.lock);
1346 if (rb_node)
1347 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1348 return 0;
1349}
1350
1351static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1352 struct btrfs_root *root, u64 objectid)
1353{
1354 struct btrfs_fs_info *fs_info = root->fs_info;
1355 struct btrfs_root *reloc_root;
1356 struct extent_buffer *eb;
1357 struct btrfs_root_item *root_item;
1358 struct btrfs_key root_key;
1359 int ret;
1360
1361 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1362 BUG_ON(!root_item);
1363
1364 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1365 root_key.type = BTRFS_ROOT_ITEM_KEY;
1366 root_key.offset = objectid;
1367
1368 if (root->root_key.objectid == objectid) {
1369 u64 commit_root_gen;
1370
1371 /* called by btrfs_init_reloc_root */
1372 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1373 BTRFS_TREE_RELOC_OBJECTID);
1374 BUG_ON(ret);
1375 /*
1376 * Set the last_snapshot field to the generation of the commit
1377 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1378 * correctly (returns true) when the relocation root is created
1379 * either inside the critical section of a transaction commit
1380 * (through transaction.c:qgroup_account_snapshot()) and when
1381 * it's created before the transaction commit is started.
1382 */
1383 commit_root_gen = btrfs_header_generation(root->commit_root);
1384 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
1385 } else {
1386 /*
1387 * called by btrfs_reloc_post_snapshot_hook.
1388 * the source tree is a reloc tree, all tree blocks
1389 * modified after it was created have RELOC flag
1390 * set in their headers. so it's OK to not update
1391 * the 'last_snapshot'.
1392 */
1393 ret = btrfs_copy_root(trans, root, root->node, &eb,
1394 BTRFS_TREE_RELOC_OBJECTID);
1395 BUG_ON(ret);
1396 }
1397
1398 memcpy(root_item, &root->root_item, sizeof(*root_item));
1399 btrfs_set_root_bytenr(root_item, eb->start);
1400 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1401 btrfs_set_root_generation(root_item, trans->transid);
1402
1403 if (root->root_key.objectid == objectid) {
1404 btrfs_set_root_refs(root_item, 0);
1405 memset(&root_item->drop_progress, 0,
1406 sizeof(struct btrfs_disk_key));
1407 root_item->drop_level = 0;
1408 }
1409
1410 btrfs_tree_unlock(eb);
1411 free_extent_buffer(eb);
1412
1413 ret = btrfs_insert_root(trans, fs_info->tree_root,
1414 &root_key, root_item);
1415 BUG_ON(ret);
1416 kfree(root_item);
1417
1418 reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
1419 BUG_ON(IS_ERR(reloc_root));
1420 reloc_root->last_trans = trans->transid;
1421 return reloc_root;
1422}
1423
1424/*
1425 * create reloc tree for a given fs tree. reloc tree is just a
1426 * snapshot of the fs tree with special root objectid.
1427 */
1428int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1429 struct btrfs_root *root)
1430{
1431 struct btrfs_fs_info *fs_info = root->fs_info;
1432 struct btrfs_root *reloc_root;
1433 struct reloc_control *rc = fs_info->reloc_ctl;
1434 struct btrfs_block_rsv *rsv;
1435 int clear_rsv = 0;
1436 int ret;
1437
1438 /*
1439 * The subvolume has reloc tree but the swap is finished, no need to
1440 * create/update the dead reloc tree
1441 */
1442 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
1443 return 0;
1444
1445 if (root->reloc_root) {
1446 reloc_root = root->reloc_root;
1447 reloc_root->last_trans = trans->transid;
1448 return 0;
1449 }
1450
1451 if (!rc || !rc->create_reloc_tree ||
1452 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1453 return 0;
1454
1455 if (!trans->reloc_reserved) {
1456 rsv = trans->block_rsv;
1457 trans->block_rsv = rc->block_rsv;
1458 clear_rsv = 1;
1459 }
1460 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1461 if (clear_rsv)
1462 trans->block_rsv = rsv;
1463
1464 ret = __add_reloc_root(reloc_root);
1465 BUG_ON(ret < 0);
1466 root->reloc_root = reloc_root;
1467 return 0;
1468}
1469
1470/*
1471 * update root item of reloc tree
1472 */
1473int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1474 struct btrfs_root *root)
1475{
1476 struct btrfs_fs_info *fs_info = root->fs_info;
1477 struct btrfs_root *reloc_root;
1478 struct btrfs_root_item *root_item;
1479 int ret;
1480
1481 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
1482 !root->reloc_root)
1483 goto out;
1484
1485 reloc_root = root->reloc_root;
1486 root_item = &reloc_root->root_item;
1487
1488 /* root->reloc_root will stay until current relocation finished */
1489 if (fs_info->reloc_ctl->merge_reloc_tree &&
1490 btrfs_root_refs(root_item) == 0) {
1491 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1492 __del_reloc_root(reloc_root);
1493 }
1494
1495 if (reloc_root->commit_root != reloc_root->node) {
1496 btrfs_set_root_node(root_item, reloc_root->node);
1497 free_extent_buffer(reloc_root->commit_root);
1498 reloc_root->commit_root = btrfs_root_node(reloc_root);
1499 }
1500
1501 ret = btrfs_update_root(trans, fs_info->tree_root,
1502 &reloc_root->root_key, root_item);
1503 BUG_ON(ret);
1504
1505out:
1506 return 0;
1507}
1508
1509/*
1510 * helper to find first cached inode with inode number >= objectid
1511 * in a subvolume
1512 */
1513static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1514{
1515 struct rb_node *node;
1516 struct rb_node *prev;
1517 struct btrfs_inode *entry;
1518 struct inode *inode;
1519
1520 spin_lock(&root->inode_lock);
1521again:
1522 node = root->inode_tree.rb_node;
1523 prev = NULL;
1524 while (node) {
1525 prev = node;
1526 entry = rb_entry(node, struct btrfs_inode, rb_node);
1527
1528 if (objectid < btrfs_ino(entry))
1529 node = node->rb_left;
1530 else if (objectid > btrfs_ino(entry))
1531 node = node->rb_right;
1532 else
1533 break;
1534 }
1535 if (!node) {
1536 while (prev) {
1537 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1538 if (objectid <= btrfs_ino(entry)) {
1539 node = prev;
1540 break;
1541 }
1542 prev = rb_next(prev);
1543 }
1544 }
1545 while (node) {
1546 entry = rb_entry(node, struct btrfs_inode, rb_node);
1547 inode = igrab(&entry->vfs_inode);
1548 if (inode) {
1549 spin_unlock(&root->inode_lock);
1550 return inode;
1551 }
1552
1553 objectid = btrfs_ino(entry) + 1;
1554 if (cond_resched_lock(&root->inode_lock))
1555 goto again;
1556
1557 node = rb_next(node);
1558 }
1559 spin_unlock(&root->inode_lock);
1560 return NULL;
1561}
1562
1563static int in_block_group(u64 bytenr,
1564 struct btrfs_block_group_cache *block_group)
1565{
1566 if (bytenr >= block_group->key.objectid &&
1567 bytenr < block_group->key.objectid + block_group->key.offset)
1568 return 1;
1569 return 0;
1570}
1571
1572/*
1573 * get new location of data
1574 */
1575static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1576 u64 bytenr, u64 num_bytes)
1577{
1578 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1579 struct btrfs_path *path;
1580 struct btrfs_file_extent_item *fi;
1581 struct extent_buffer *leaf;
1582 int ret;
1583
1584 path = btrfs_alloc_path();
1585 if (!path)
1586 return -ENOMEM;
1587
1588 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1589 ret = btrfs_lookup_file_extent(NULL, root, path,
1590 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1591 if (ret < 0)
1592 goto out;
1593 if (ret > 0) {
1594 ret = -ENOENT;
1595 goto out;
1596 }
1597
1598 leaf = path->nodes[0];
1599 fi = btrfs_item_ptr(leaf, path->slots[0],
1600 struct btrfs_file_extent_item);
1601
1602 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1603 btrfs_file_extent_compression(leaf, fi) ||
1604 btrfs_file_extent_encryption(leaf, fi) ||
1605 btrfs_file_extent_other_encoding(leaf, fi));
1606
1607 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1608 ret = -EINVAL;
1609 goto out;
1610 }
1611
1612 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1613 ret = 0;
1614out:
1615 btrfs_free_path(path);
1616 return ret;
1617}
1618
1619/*
1620 * update file extent items in the tree leaf to point to
1621 * the new locations.
1622 */
1623static noinline_for_stack
1624int replace_file_extents(struct btrfs_trans_handle *trans,
1625 struct reloc_control *rc,
1626 struct btrfs_root *root,
1627 struct extent_buffer *leaf)
1628{
1629 struct btrfs_fs_info *fs_info = root->fs_info;
1630 struct btrfs_key key;
1631 struct btrfs_file_extent_item *fi;
1632 struct inode *inode = NULL;
1633 u64 parent;
1634 u64 bytenr;
1635 u64 new_bytenr = 0;
1636 u64 num_bytes;
1637 u64 end;
1638 u32 nritems;
1639 u32 i;
1640 int ret = 0;
1641 int first = 1;
1642 int dirty = 0;
1643
1644 if (rc->stage != UPDATE_DATA_PTRS)
1645 return 0;
1646
1647 /* reloc trees always use full backref */
1648 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1649 parent = leaf->start;
1650 else
1651 parent = 0;
1652
1653 nritems = btrfs_header_nritems(leaf);
1654 for (i = 0; i < nritems; i++) {
1655 struct btrfs_ref ref = { 0 };
1656
1657 cond_resched();
1658 btrfs_item_key_to_cpu(leaf, &key, i);
1659 if (key.type != BTRFS_EXTENT_DATA_KEY)
1660 continue;
1661 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1662 if (btrfs_file_extent_type(leaf, fi) ==
1663 BTRFS_FILE_EXTENT_INLINE)
1664 continue;
1665 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1666 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1667 if (bytenr == 0)
1668 continue;
1669 if (!in_block_group(bytenr, rc->block_group))
1670 continue;
1671
1672 /*
1673 * if we are modifying block in fs tree, wait for readpage
1674 * to complete and drop the extent cache
1675 */
1676 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1677 if (first) {
1678 inode = find_next_inode(root, key.objectid);
1679 first = 0;
1680 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1681 btrfs_add_delayed_iput(inode);
1682 inode = find_next_inode(root, key.objectid);
1683 }
1684 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1685 end = key.offset +
1686 btrfs_file_extent_num_bytes(leaf, fi);
1687 WARN_ON(!IS_ALIGNED(key.offset,
1688 fs_info->sectorsize));
1689 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1690 end--;
1691 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1692 key.offset, end);
1693 if (!ret)
1694 continue;
1695
1696 btrfs_drop_extent_cache(BTRFS_I(inode),
1697 key.offset, end, 1);
1698 unlock_extent(&BTRFS_I(inode)->io_tree,
1699 key.offset, end);
1700 }
1701 }
1702
1703 ret = get_new_location(rc->data_inode, &new_bytenr,
1704 bytenr, num_bytes);
1705 if (ret) {
1706 /*
1707 * Don't have to abort since we've not changed anything
1708 * in the file extent yet.
1709 */
1710 break;
1711 }
1712
1713 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1714 dirty = 1;
1715
1716 key.offset -= btrfs_file_extent_offset(leaf, fi);
1717 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1718 num_bytes, parent);
1719 ref.real_root = root->root_key.objectid;
1720 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1721 key.objectid, key.offset);
1722 ret = btrfs_inc_extent_ref(trans, &ref);
1723 if (ret) {
1724 btrfs_abort_transaction(trans, ret);
1725 break;
1726 }
1727
1728 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1729 num_bytes, parent);
1730 ref.real_root = root->root_key.objectid;
1731 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1732 key.objectid, key.offset);
1733 ret = btrfs_free_extent(trans, &ref);
1734 if (ret) {
1735 btrfs_abort_transaction(trans, ret);
1736 break;
1737 }
1738 }
1739 if (dirty)
1740 btrfs_mark_buffer_dirty(leaf);
1741 if (inode)
1742 btrfs_add_delayed_iput(inode);
1743 return ret;
1744}
1745
1746static noinline_for_stack
1747int memcmp_node_keys(struct extent_buffer *eb, int slot,
1748 struct btrfs_path *path, int level)
1749{
1750 struct btrfs_disk_key key1;
1751 struct btrfs_disk_key key2;
1752 btrfs_node_key(eb, &key1, slot);
1753 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1754 return memcmp(&key1, &key2, sizeof(key1));
1755}
1756
1757/*
1758 * try to replace tree blocks in fs tree with the new blocks
1759 * in reloc tree. tree blocks haven't been modified since the
1760 * reloc tree was create can be replaced.
1761 *
1762 * if a block was replaced, level of the block + 1 is returned.
1763 * if no block got replaced, 0 is returned. if there are other
1764 * errors, a negative error number is returned.
1765 */
1766static noinline_for_stack
1767int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1768 struct btrfs_root *dest, struct btrfs_root *src,
1769 struct btrfs_path *path, struct btrfs_key *next_key,
1770 int lowest_level, int max_level)
1771{
1772 struct btrfs_fs_info *fs_info = dest->fs_info;
1773 struct extent_buffer *eb;
1774 struct extent_buffer *parent;
1775 struct btrfs_ref ref = { 0 };
1776 struct btrfs_key key;
1777 u64 old_bytenr;
1778 u64 new_bytenr;
1779 u64 old_ptr_gen;
1780 u64 new_ptr_gen;
1781 u64 last_snapshot;
1782 u32 blocksize;
1783 int cow = 0;
1784 int level;
1785 int ret;
1786 int slot;
1787
1788 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1789 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1790
1791 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1792again:
1793 slot = path->slots[lowest_level];
1794 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1795
1796 eb = btrfs_lock_root_node(dest);
1797 btrfs_set_lock_blocking_write(eb);
1798 level = btrfs_header_level(eb);
1799
1800 if (level < lowest_level) {
1801 btrfs_tree_unlock(eb);
1802 free_extent_buffer(eb);
1803 return 0;
1804 }
1805
1806 if (cow) {
1807 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1808 BUG_ON(ret);
1809 }
1810 btrfs_set_lock_blocking_write(eb);
1811
1812 if (next_key) {
1813 next_key->objectid = (u64)-1;
1814 next_key->type = (u8)-1;
1815 next_key->offset = (u64)-1;
1816 }
1817
1818 parent = eb;
1819 while (1) {
1820 struct btrfs_key first_key;
1821
1822 level = btrfs_header_level(parent);
1823 BUG_ON(level < lowest_level);
1824
1825 ret = btrfs_bin_search(parent, &key, level, &slot);
1826 if (ret < 0)
1827 break;
1828 if (ret && slot > 0)
1829 slot--;
1830
1831 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1832 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1833
1834 old_bytenr = btrfs_node_blockptr(parent, slot);
1835 blocksize = fs_info->nodesize;
1836 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1837 btrfs_node_key_to_cpu(parent, &first_key, slot);
1838
1839 if (level <= max_level) {
1840 eb = path->nodes[level];
1841 new_bytenr = btrfs_node_blockptr(eb,
1842 path->slots[level]);
1843 new_ptr_gen = btrfs_node_ptr_generation(eb,
1844 path->slots[level]);
1845 } else {
1846 new_bytenr = 0;
1847 new_ptr_gen = 0;
1848 }
1849
1850 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1851 ret = level;
1852 break;
1853 }
1854
1855 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1856 memcmp_node_keys(parent, slot, path, level)) {
1857 if (level <= lowest_level) {
1858 ret = 0;
1859 break;
1860 }
1861
1862 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
1863 level - 1, &first_key);
1864 if (IS_ERR(eb)) {
1865 ret = PTR_ERR(eb);
1866 break;
1867 } else if (!extent_buffer_uptodate(eb)) {
1868 ret = -EIO;
1869 free_extent_buffer(eb);
1870 break;
1871 }
1872 btrfs_tree_lock(eb);
1873 if (cow) {
1874 ret = btrfs_cow_block(trans, dest, eb, parent,
1875 slot, &eb);
1876 BUG_ON(ret);
1877 }
1878 btrfs_set_lock_blocking_write(eb);
1879
1880 btrfs_tree_unlock(parent);
1881 free_extent_buffer(parent);
1882
1883 parent = eb;
1884 continue;
1885 }
1886
1887 if (!cow) {
1888 btrfs_tree_unlock(parent);
1889 free_extent_buffer(parent);
1890 cow = 1;
1891 goto again;
1892 }
1893
1894 btrfs_node_key_to_cpu(path->nodes[level], &key,
1895 path->slots[level]);
1896 btrfs_release_path(path);
1897
1898 path->lowest_level = level;
1899 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1900 path->lowest_level = 0;
1901 BUG_ON(ret);
1902
1903 /*
1904 * Info qgroup to trace both subtrees.
1905 *
1906 * We must trace both trees.
1907 * 1) Tree reloc subtree
1908 * If not traced, we will leak data numbers
1909 * 2) Fs subtree
1910 * If not traced, we will double count old data
1911 *
1912 * We don't scan the subtree right now, but only record
1913 * the swapped tree blocks.
1914 * The real subtree rescan is delayed until we have new
1915 * CoW on the subtree root node before transaction commit.
1916 */
1917 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1918 rc->block_group, parent, slot,
1919 path->nodes[level], path->slots[level],
1920 last_snapshot);
1921 if (ret < 0)
1922 break;
1923 /*
1924 * swap blocks in fs tree and reloc tree.
1925 */
1926 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1927 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1928 btrfs_mark_buffer_dirty(parent);
1929
1930 btrfs_set_node_blockptr(path->nodes[level],
1931 path->slots[level], old_bytenr);
1932 btrfs_set_node_ptr_generation(path->nodes[level],
1933 path->slots[level], old_ptr_gen);
1934 btrfs_mark_buffer_dirty(path->nodes[level]);
1935
1936 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1937 blocksize, path->nodes[level]->start);
1938 ref.skip_qgroup = true;
1939 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
1940 ret = btrfs_inc_extent_ref(trans, &ref);
1941 BUG_ON(ret);
1942 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1943 blocksize, 0);
1944 ref.skip_qgroup = true;
1945 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
1946 ret = btrfs_inc_extent_ref(trans, &ref);
1947 BUG_ON(ret);
1948
1949 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1950 blocksize, path->nodes[level]->start);
1951 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
1952 ref.skip_qgroup = true;
1953 ret = btrfs_free_extent(trans, &ref);
1954 BUG_ON(ret);
1955
1956 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1957 blocksize, 0);
1958 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
1959 ref.skip_qgroup = true;
1960 ret = btrfs_free_extent(trans, &ref);
1961 BUG_ON(ret);
1962
1963 btrfs_unlock_up_safe(path, 0);
1964
1965 ret = level;
1966 break;
1967 }
1968 btrfs_tree_unlock(parent);
1969 free_extent_buffer(parent);
1970 return ret;
1971}
1972
1973/*
1974 * helper to find next relocated block in reloc tree
1975 */
1976static noinline_for_stack
1977int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1978 int *level)
1979{
1980 struct extent_buffer *eb;
1981 int i;
1982 u64 last_snapshot;
1983 u32 nritems;
1984
1985 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1986
1987 for (i = 0; i < *level; i++) {
1988 free_extent_buffer(path->nodes[i]);
1989 path->nodes[i] = NULL;
1990 }
1991
1992 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1993 eb = path->nodes[i];
1994 nritems = btrfs_header_nritems(eb);
1995 while (path->slots[i] + 1 < nritems) {
1996 path->slots[i]++;
1997 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1998 last_snapshot)
1999 continue;
2000
2001 *level = i;
2002 return 0;
2003 }
2004 free_extent_buffer(path->nodes[i]);
2005 path->nodes[i] = NULL;
2006 }
2007 return 1;
2008}
2009
2010/*
2011 * walk down reloc tree to find relocated block of lowest level
2012 */
2013static noinline_for_stack
2014int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
2015 int *level)
2016{
2017 struct btrfs_fs_info *fs_info = root->fs_info;
2018 struct extent_buffer *eb = NULL;
2019 int i;
2020 u64 bytenr;
2021 u64 ptr_gen = 0;
2022 u64 last_snapshot;
2023 u32 nritems;
2024
2025 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2026
2027 for (i = *level; i > 0; i--) {
2028 struct btrfs_key first_key;
2029
2030 eb = path->nodes[i];
2031 nritems = btrfs_header_nritems(eb);
2032 while (path->slots[i] < nritems) {
2033 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
2034 if (ptr_gen > last_snapshot)
2035 break;
2036 path->slots[i]++;
2037 }
2038 if (path->slots[i] >= nritems) {
2039 if (i == *level)
2040 break;
2041 *level = i + 1;
2042 return 0;
2043 }
2044 if (i == 1) {
2045 *level = i;
2046 return 0;
2047 }
2048
2049 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
2050 btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
2051 eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
2052 &first_key);
2053 if (IS_ERR(eb)) {
2054 return PTR_ERR(eb);
2055 } else if (!extent_buffer_uptodate(eb)) {
2056 free_extent_buffer(eb);
2057 return -EIO;
2058 }
2059 BUG_ON(btrfs_header_level(eb) != i - 1);
2060 path->nodes[i - 1] = eb;
2061 path->slots[i - 1] = 0;
2062 }
2063 return 1;
2064}
2065
2066/*
2067 * invalidate extent cache for file extents whose key in range of
2068 * [min_key, max_key)
2069 */
2070static int invalidate_extent_cache(struct btrfs_root *root,
2071 struct btrfs_key *min_key,
2072 struct btrfs_key *max_key)
2073{
2074 struct btrfs_fs_info *fs_info = root->fs_info;
2075 struct inode *inode = NULL;
2076 u64 objectid;
2077 u64 start, end;
2078 u64 ino;
2079
2080 objectid = min_key->objectid;
2081 while (1) {
2082 cond_resched();
2083 iput(inode);
2084
2085 if (objectid > max_key->objectid)
2086 break;
2087
2088 inode = find_next_inode(root, objectid);
2089 if (!inode)
2090 break;
2091 ino = btrfs_ino(BTRFS_I(inode));
2092
2093 if (ino > max_key->objectid) {
2094 iput(inode);
2095 break;
2096 }
2097
2098 objectid = ino + 1;
2099 if (!S_ISREG(inode->i_mode))
2100 continue;
2101
2102 if (unlikely(min_key->objectid == ino)) {
2103 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
2104 continue;
2105 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
2106 start = 0;
2107 else {
2108 start = min_key->offset;
2109 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
2110 }
2111 } else {
2112 start = 0;
2113 }
2114
2115 if (unlikely(max_key->objectid == ino)) {
2116 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
2117 continue;
2118 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
2119 end = (u64)-1;
2120 } else {
2121 if (max_key->offset == 0)
2122 continue;
2123 end = max_key->offset;
2124 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
2125 end--;
2126 }
2127 } else {
2128 end = (u64)-1;
2129 }
2130
2131 /* the lock_extent waits for readpage to complete */
2132 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2133 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
2134 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2135 }
2136 return 0;
2137}
2138
2139static int find_next_key(struct btrfs_path *path, int level,
2140 struct btrfs_key *key)
2141
2142{
2143 while (level < BTRFS_MAX_LEVEL) {
2144 if (!path->nodes[level])
2145 break;
2146 if (path->slots[level] + 1 <
2147 btrfs_header_nritems(path->nodes[level])) {
2148 btrfs_node_key_to_cpu(path->nodes[level], key,
2149 path->slots[level] + 1);
2150 return 0;
2151 }
2152 level++;
2153 }
2154 return 1;
2155}
2156
2157/*
2158 * Insert current subvolume into reloc_control::dirty_subvol_roots
2159 */
2160static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
2161 struct reloc_control *rc,
2162 struct btrfs_root *root)
2163{
2164 struct btrfs_root *reloc_root = root->reloc_root;
2165 struct btrfs_root_item *reloc_root_item;
2166
2167 /* @root must be a subvolume tree root with a valid reloc tree */
2168 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
2169 ASSERT(reloc_root);
2170
2171 reloc_root_item = &reloc_root->root_item;
2172 memset(&reloc_root_item->drop_progress, 0,
2173 sizeof(reloc_root_item->drop_progress));
2174 reloc_root_item->drop_level = 0;
2175 btrfs_set_root_refs(reloc_root_item, 0);
2176 btrfs_update_reloc_root(trans, root);
2177
2178 if (list_empty(&root->reloc_dirty_list)) {
2179 btrfs_grab_fs_root(root);
2180 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
2181 }
2182}
2183
2184static int clean_dirty_subvols(struct reloc_control *rc)
2185{
2186 struct btrfs_root *root;
2187 struct btrfs_root *next;
2188 int ret = 0;
2189 int ret2;
2190
2191 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
2192 reloc_dirty_list) {
2193 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2194 /* Merged subvolume, cleanup its reloc root */
2195 struct btrfs_root *reloc_root = root->reloc_root;
2196
2197 list_del_init(&root->reloc_dirty_list);
2198 root->reloc_root = NULL;
2199 if (reloc_root) {
2200
2201 ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1);
2202 if (ret2 < 0 && !ret)
2203 ret = ret2;
2204 }
2205 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
2206 btrfs_put_fs_root(root);
2207 } else {
2208 /* Orphan reloc tree, just clean it up */
2209 ret2 = btrfs_drop_snapshot(root, NULL, 0, 1);
2210 if (ret2 < 0 && !ret)
2211 ret = ret2;
2212 }
2213 }
2214 return ret;
2215}
2216
2217/*
2218 * merge the relocated tree blocks in reloc tree with corresponding
2219 * fs tree.
2220 */
2221static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2222 struct btrfs_root *root)
2223{
2224 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2225 struct btrfs_key key;
2226 struct btrfs_key next_key;
2227 struct btrfs_trans_handle *trans = NULL;
2228 struct btrfs_root *reloc_root;
2229 struct btrfs_root_item *root_item;
2230 struct btrfs_path *path;
2231 struct extent_buffer *leaf;
2232 int level;
2233 int max_level;
2234 int replaced = 0;
2235 int ret;
2236 int err = 0;
2237 u32 min_reserved;
2238
2239 path = btrfs_alloc_path();
2240 if (!path)
2241 return -ENOMEM;
2242 path->reada = READA_FORWARD;
2243
2244 reloc_root = root->reloc_root;
2245 root_item = &reloc_root->root_item;
2246
2247 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2248 level = btrfs_root_level(root_item);
2249 extent_buffer_get(reloc_root->node);
2250 path->nodes[level] = reloc_root->node;
2251 path->slots[level] = 0;
2252 } else {
2253 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2254
2255 level = root_item->drop_level;
2256 BUG_ON(level == 0);
2257 path->lowest_level = level;
2258 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2259 path->lowest_level = 0;
2260 if (ret < 0) {
2261 btrfs_free_path(path);
2262 return ret;
2263 }
2264
2265 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2266 path->slots[level]);
2267 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2268
2269 btrfs_unlock_up_safe(path, 0);
2270 }
2271
2272 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2273 memset(&next_key, 0, sizeof(next_key));
2274
2275 while (1) {
2276 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2277 BTRFS_RESERVE_FLUSH_ALL);
2278 if (ret) {
2279 err = ret;
2280 goto out;
2281 }
2282 trans = btrfs_start_transaction(root, 0);
2283 if (IS_ERR(trans)) {
2284 err = PTR_ERR(trans);
2285 trans = NULL;
2286 goto out;
2287 }
2288 trans->block_rsv = rc->block_rsv;
2289
2290 replaced = 0;
2291 max_level = level;
2292
2293 ret = walk_down_reloc_tree(reloc_root, path, &level);
2294 if (ret < 0) {
2295 err = ret;
2296 goto out;
2297 }
2298 if (ret > 0)
2299 break;
2300
2301 if (!find_next_key(path, level, &key) &&
2302 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2303 ret = 0;
2304 } else {
2305 ret = replace_path(trans, rc, root, reloc_root, path,
2306 &next_key, level, max_level);
2307 }
2308 if (ret < 0) {
2309 err = ret;
2310 goto out;
2311 }
2312
2313 if (ret > 0) {
2314 level = ret;
2315 btrfs_node_key_to_cpu(path->nodes[level], &key,
2316 path->slots[level]);
2317 replaced = 1;
2318 }
2319
2320 ret = walk_up_reloc_tree(reloc_root, path, &level);
2321 if (ret > 0)
2322 break;
2323
2324 BUG_ON(level == 0);
2325 /*
2326 * save the merging progress in the drop_progress.
2327 * this is OK since root refs == 1 in this case.
2328 */
2329 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2330 path->slots[level]);
2331 root_item->drop_level = level;
2332
2333 btrfs_end_transaction_throttle(trans);
2334 trans = NULL;
2335
2336 btrfs_btree_balance_dirty(fs_info);
2337
2338 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2339 invalidate_extent_cache(root, &key, &next_key);
2340 }
2341
2342 /*
2343 * handle the case only one block in the fs tree need to be
2344 * relocated and the block is tree root.
2345 */
2346 leaf = btrfs_lock_root_node(root);
2347 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2348 btrfs_tree_unlock(leaf);
2349 free_extent_buffer(leaf);
2350 if (ret < 0)
2351 err = ret;
2352out:
2353 btrfs_free_path(path);
2354
2355 if (err == 0)
2356 insert_dirty_subvol(trans, rc, root);
2357
2358 if (trans)
2359 btrfs_end_transaction_throttle(trans);
2360
2361 btrfs_btree_balance_dirty(fs_info);
2362
2363 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2364 invalidate_extent_cache(root, &key, &next_key);
2365
2366 return err;
2367}
2368
2369static noinline_for_stack
2370int prepare_to_merge(struct reloc_control *rc, int err)
2371{
2372 struct btrfs_root *root = rc->extent_root;
2373 struct btrfs_fs_info *fs_info = root->fs_info;
2374 struct btrfs_root *reloc_root;
2375 struct btrfs_trans_handle *trans;
2376 LIST_HEAD(reloc_roots);
2377 u64 num_bytes = 0;
2378 int ret;
2379
2380 mutex_lock(&fs_info->reloc_mutex);
2381 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2382 rc->merging_rsv_size += rc->nodes_relocated * 2;
2383 mutex_unlock(&fs_info->reloc_mutex);
2384
2385again:
2386 if (!err) {
2387 num_bytes = rc->merging_rsv_size;
2388 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2389 BTRFS_RESERVE_FLUSH_ALL);
2390 if (ret)
2391 err = ret;
2392 }
2393
2394 trans = btrfs_join_transaction(rc->extent_root);
2395 if (IS_ERR(trans)) {
2396 if (!err)
2397 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2398 num_bytes);
2399 return PTR_ERR(trans);
2400 }
2401
2402 if (!err) {
2403 if (num_bytes != rc->merging_rsv_size) {
2404 btrfs_end_transaction(trans);
2405 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2406 num_bytes);
2407 goto again;
2408 }
2409 }
2410
2411 rc->merge_reloc_tree = 1;
2412
2413 while (!list_empty(&rc->reloc_roots)) {
2414 reloc_root = list_entry(rc->reloc_roots.next,
2415 struct btrfs_root, root_list);
2416 list_del_init(&reloc_root->root_list);
2417
2418 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2419 BUG_ON(IS_ERR(root));
2420 BUG_ON(root->reloc_root != reloc_root);
2421
2422 /*
2423 * set reference count to 1, so btrfs_recover_relocation
2424 * knows it should resumes merging
2425 */
2426 if (!err)
2427 btrfs_set_root_refs(&reloc_root->root_item, 1);
2428 btrfs_update_reloc_root(trans, root);
2429
2430 list_add(&reloc_root->root_list, &reloc_roots);
2431 }
2432
2433 list_splice(&reloc_roots, &rc->reloc_roots);
2434
2435 if (!err)
2436 btrfs_commit_transaction(trans);
2437 else
2438 btrfs_end_transaction(trans);
2439 return err;
2440}
2441
2442static noinline_for_stack
2443void free_reloc_roots(struct list_head *list)
2444{
2445 struct btrfs_root *reloc_root;
2446
2447 while (!list_empty(list)) {
2448 reloc_root = list_entry(list->next, struct btrfs_root,
2449 root_list);
2450 __del_reloc_root(reloc_root);
2451 free_extent_buffer(reloc_root->node);
2452 free_extent_buffer(reloc_root->commit_root);
2453 reloc_root->node = NULL;
2454 reloc_root->commit_root = NULL;
2455 }
2456}
2457
2458static noinline_for_stack
2459void merge_reloc_roots(struct reloc_control *rc)
2460{
2461 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2462 struct btrfs_root *root;
2463 struct btrfs_root *reloc_root;
2464 LIST_HEAD(reloc_roots);
2465 int found = 0;
2466 int ret = 0;
2467again:
2468 root = rc->extent_root;
2469
2470 /*
2471 * this serializes us with btrfs_record_root_in_transaction,
2472 * we have to make sure nobody is in the middle of
2473 * adding their roots to the list while we are
2474 * doing this splice
2475 */
2476 mutex_lock(&fs_info->reloc_mutex);
2477 list_splice_init(&rc->reloc_roots, &reloc_roots);
2478 mutex_unlock(&fs_info->reloc_mutex);
2479
2480 while (!list_empty(&reloc_roots)) {
2481 found = 1;
2482 reloc_root = list_entry(reloc_roots.next,
2483 struct btrfs_root, root_list);
2484
2485 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2486 root = read_fs_root(fs_info,
2487 reloc_root->root_key.offset);
2488 BUG_ON(IS_ERR(root));
2489 BUG_ON(root->reloc_root != reloc_root);
2490
2491 ret = merge_reloc_root(rc, root);
2492 if (ret) {
2493 if (list_empty(&reloc_root->root_list))
2494 list_add_tail(&reloc_root->root_list,
2495 &reloc_roots);
2496 goto out;
2497 }
2498 } else {
2499 list_del_init(&reloc_root->root_list);
2500 /* Don't forget to queue this reloc root for cleanup */
2501 list_add_tail(&reloc_root->reloc_dirty_list,
2502 &rc->dirty_subvol_roots);
2503 }
2504 }
2505
2506 if (found) {
2507 found = 0;
2508 goto again;
2509 }
2510out:
2511 if (ret) {
2512 btrfs_handle_fs_error(fs_info, ret, NULL);
2513 if (!list_empty(&reloc_roots))
2514 free_reloc_roots(&reloc_roots);
2515
2516 /* new reloc root may be added */
2517 mutex_lock(&fs_info->reloc_mutex);
2518 list_splice_init(&rc->reloc_roots, &reloc_roots);
2519 mutex_unlock(&fs_info->reloc_mutex);
2520 if (!list_empty(&reloc_roots))
2521 free_reloc_roots(&reloc_roots);
2522 }
2523
2524 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2525}
2526
2527static void free_block_list(struct rb_root *blocks)
2528{
2529 struct tree_block *block;
2530 struct rb_node *rb_node;
2531 while ((rb_node = rb_first(blocks))) {
2532 block = rb_entry(rb_node, struct tree_block, rb_node);
2533 rb_erase(rb_node, blocks);
2534 kfree(block);
2535 }
2536}
2537
2538static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2539 struct btrfs_root *reloc_root)
2540{
2541 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2542 struct btrfs_root *root;
2543
2544 if (reloc_root->last_trans == trans->transid)
2545 return 0;
2546
2547 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2548 BUG_ON(IS_ERR(root));
2549 BUG_ON(root->reloc_root != reloc_root);
2550
2551 return btrfs_record_root_in_trans(trans, root);
2552}
2553
2554static noinline_for_stack
2555struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2556 struct reloc_control *rc,
2557 struct backref_node *node,
2558 struct backref_edge *edges[])
2559{
2560 struct backref_node *next;
2561 struct btrfs_root *root;
2562 int index = 0;
2563
2564 next = node;
2565 while (1) {
2566 cond_resched();
2567 next = walk_up_backref(next, edges, &index);
2568 root = next->root;
2569 BUG_ON(!root);
2570 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
2571
2572 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2573 record_reloc_root_in_trans(trans, root);
2574 break;
2575 }
2576
2577 btrfs_record_root_in_trans(trans, root);
2578 root = root->reloc_root;
2579
2580 if (next->new_bytenr != root->node->start) {
2581 BUG_ON(next->new_bytenr);
2582 BUG_ON(!list_empty(&next->list));
2583 next->new_bytenr = root->node->start;
2584 next->root = root;
2585 list_add_tail(&next->list,
2586 &rc->backref_cache.changed);
2587 __mark_block_processed(rc, next);
2588 break;
2589 }
2590
2591 WARN_ON(1);
2592 root = NULL;
2593 next = walk_down_backref(edges, &index);
2594 if (!next || next->level <= node->level)
2595 break;
2596 }
2597 if (!root)
2598 return NULL;
2599
2600 next = node;
2601 /* setup backref node path for btrfs_reloc_cow_block */
2602 while (1) {
2603 rc->backref_cache.path[next->level] = next;
2604 if (--index < 0)
2605 break;
2606 next = edges[index]->node[UPPER];
2607 }
2608 return root;
2609}
2610
2611/*
2612 * select a tree root for relocation. return NULL if the block
2613 * is reference counted. we should use do_relocation() in this
2614 * case. return a tree root pointer if the block isn't reference
2615 * counted. return -ENOENT if the block is root of reloc tree.
2616 */
2617static noinline_for_stack
2618struct btrfs_root *select_one_root(struct backref_node *node)
2619{
2620 struct backref_node *next;
2621 struct btrfs_root *root;
2622 struct btrfs_root *fs_root = NULL;
2623 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2624 int index = 0;
2625
2626 next = node;
2627 while (1) {
2628 cond_resched();
2629 next = walk_up_backref(next, edges, &index);
2630 root = next->root;
2631 BUG_ON(!root);
2632
2633 /* no other choice for non-references counted tree */
2634 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
2635 return root;
2636
2637 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2638 fs_root = root;
2639
2640 if (next != node)
2641 return NULL;
2642
2643 next = walk_down_backref(edges, &index);
2644 if (!next || next->level <= node->level)
2645 break;
2646 }
2647
2648 if (!fs_root)
2649 return ERR_PTR(-ENOENT);
2650 return fs_root;
2651}
2652
2653static noinline_for_stack
2654u64 calcu_metadata_size(struct reloc_control *rc,
2655 struct backref_node *node, int reserve)
2656{
2657 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2658 struct backref_node *next = node;
2659 struct backref_edge *edge;
2660 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2661 u64 num_bytes = 0;
2662 int index = 0;
2663
2664 BUG_ON(reserve && node->processed);
2665
2666 while (next) {
2667 cond_resched();
2668 while (1) {
2669 if (next->processed && (reserve || next != node))
2670 break;
2671
2672 num_bytes += fs_info->nodesize;
2673
2674 if (list_empty(&next->upper))
2675 break;
2676
2677 edge = list_entry(next->upper.next,
2678 struct backref_edge, list[LOWER]);
2679 edges[index++] = edge;
2680 next = edge->node[UPPER];
2681 }
2682 next = walk_down_backref(edges, &index);
2683 }
2684 return num_bytes;
2685}
2686
2687static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2688 struct reloc_control *rc,
2689 struct backref_node *node)
2690{
2691 struct btrfs_root *root = rc->extent_root;
2692 struct btrfs_fs_info *fs_info = root->fs_info;
2693 u64 num_bytes;
2694 int ret;
2695 u64 tmp;
2696
2697 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2698
2699 trans->block_rsv = rc->block_rsv;
2700 rc->reserved_bytes += num_bytes;
2701
2702 /*
2703 * We are under a transaction here so we can only do limited flushing.
2704 * If we get an enospc just kick back -EAGAIN so we know to drop the
2705 * transaction and try to refill when we can flush all the things.
2706 */
2707 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2708 BTRFS_RESERVE_FLUSH_LIMIT);
2709 if (ret) {
2710 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2711 while (tmp <= rc->reserved_bytes)
2712 tmp <<= 1;
2713 /*
2714 * only one thread can access block_rsv at this point,
2715 * so we don't need hold lock to protect block_rsv.
2716 * we expand more reservation size here to allow enough
2717 * space for relocation and we will return earlier in
2718 * enospc case.
2719 */
2720 rc->block_rsv->size = tmp + fs_info->nodesize *
2721 RELOCATION_RESERVED_NODES;
2722 return -EAGAIN;
2723 }
2724
2725 return 0;
2726}
2727
2728/*
2729 * relocate a block tree, and then update pointers in upper level
2730 * blocks that reference the block to point to the new location.
2731 *
2732 * if called by link_to_upper, the block has already been relocated.
2733 * in that case this function just updates pointers.
2734 */
2735static int do_relocation(struct btrfs_trans_handle *trans,
2736 struct reloc_control *rc,
2737 struct backref_node *node,
2738 struct btrfs_key *key,
2739 struct btrfs_path *path, int lowest)
2740{
2741 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2742 struct backref_node *upper;
2743 struct backref_edge *edge;
2744 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2745 struct btrfs_root *root;
2746 struct extent_buffer *eb;
2747 u32 blocksize;
2748 u64 bytenr;
2749 u64 generation;
2750 int slot;
2751 int ret;
2752 int err = 0;
2753
2754 BUG_ON(lowest && node->eb);
2755
2756 path->lowest_level = node->level + 1;
2757 rc->backref_cache.path[node->level] = node;
2758 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2759 struct btrfs_key first_key;
2760 struct btrfs_ref ref = { 0 };
2761
2762 cond_resched();
2763
2764 upper = edge->node[UPPER];
2765 root = select_reloc_root(trans, rc, upper, edges);
2766 BUG_ON(!root);
2767
2768 if (upper->eb && !upper->locked) {
2769 if (!lowest) {
2770 ret = btrfs_bin_search(upper->eb, key,
2771 upper->level, &slot);
2772 if (ret < 0) {
2773 err = ret;
2774 goto next;
2775 }
2776 BUG_ON(ret);
2777 bytenr = btrfs_node_blockptr(upper->eb, slot);
2778 if (node->eb->start == bytenr)
2779 goto next;
2780 }
2781 drop_node_buffer(upper);
2782 }
2783
2784 if (!upper->eb) {
2785 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2786 if (ret) {
2787 if (ret < 0)
2788 err = ret;
2789 else
2790 err = -ENOENT;
2791
2792 btrfs_release_path(path);
2793 break;
2794 }
2795
2796 if (!upper->eb) {
2797 upper->eb = path->nodes[upper->level];
2798 path->nodes[upper->level] = NULL;
2799 } else {
2800 BUG_ON(upper->eb != path->nodes[upper->level]);
2801 }
2802
2803 upper->locked = 1;
2804 path->locks[upper->level] = 0;
2805
2806 slot = path->slots[upper->level];
2807 btrfs_release_path(path);
2808 } else {
2809 ret = btrfs_bin_search(upper->eb, key, upper->level,
2810 &slot);
2811 if (ret < 0) {
2812 err = ret;
2813 goto next;
2814 }
2815 BUG_ON(ret);
2816 }
2817
2818 bytenr = btrfs_node_blockptr(upper->eb, slot);
2819 if (lowest) {
2820 if (bytenr != node->bytenr) {
2821 btrfs_err(root->fs_info,
2822 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2823 bytenr, node->bytenr, slot,
2824 upper->eb->start);
2825 err = -EIO;
2826 goto next;
2827 }
2828 } else {
2829 if (node->eb->start == bytenr)
2830 goto next;
2831 }
2832
2833 blocksize = root->fs_info->nodesize;
2834 generation = btrfs_node_ptr_generation(upper->eb, slot);
2835 btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
2836 eb = read_tree_block(fs_info, bytenr, generation,
2837 upper->level - 1, &first_key);
2838 if (IS_ERR(eb)) {
2839 err = PTR_ERR(eb);
2840 goto next;
2841 } else if (!extent_buffer_uptodate(eb)) {
2842 free_extent_buffer(eb);
2843 err = -EIO;
2844 goto next;
2845 }
2846 btrfs_tree_lock(eb);
2847 btrfs_set_lock_blocking_write(eb);
2848
2849 if (!node->eb) {
2850 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2851 slot, &eb);
2852 btrfs_tree_unlock(eb);
2853 free_extent_buffer(eb);
2854 if (ret < 0) {
2855 err = ret;
2856 goto next;
2857 }
2858 BUG_ON(node->eb != eb);
2859 } else {
2860 btrfs_set_node_blockptr(upper->eb, slot,
2861 node->eb->start);
2862 btrfs_set_node_ptr_generation(upper->eb, slot,
2863 trans->transid);
2864 btrfs_mark_buffer_dirty(upper->eb);
2865
2866 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2867 node->eb->start, blocksize,
2868 upper->eb->start);
2869 ref.real_root = root->root_key.objectid;
2870 btrfs_init_tree_ref(&ref, node->level,
2871 btrfs_header_owner(upper->eb));
2872 ret = btrfs_inc_extent_ref(trans, &ref);
2873 BUG_ON(ret);
2874
2875 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2876 BUG_ON(ret);
2877 }
2878next:
2879 if (!upper->pending)
2880 drop_node_buffer(upper);
2881 else
2882 unlock_node_buffer(upper);
2883 if (err)
2884 break;
2885 }
2886
2887 if (!err && node->pending) {
2888 drop_node_buffer(node);
2889 list_move_tail(&node->list, &rc->backref_cache.changed);
2890 node->pending = 0;
2891 }
2892
2893 path->lowest_level = 0;
2894 BUG_ON(err == -ENOSPC);
2895 return err;
2896}
2897
2898static int link_to_upper(struct btrfs_trans_handle *trans,
2899 struct reloc_control *rc,
2900 struct backref_node *node,
2901 struct btrfs_path *path)
2902{
2903 struct btrfs_key key;
2904
2905 btrfs_node_key_to_cpu(node->eb, &key, 0);
2906 return do_relocation(trans, rc, node, &key, path, 0);
2907}
2908
2909static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2910 struct reloc_control *rc,
2911 struct btrfs_path *path, int err)
2912{
2913 LIST_HEAD(list);
2914 struct backref_cache *cache = &rc->backref_cache;
2915 struct backref_node *node;
2916 int level;
2917 int ret;
2918
2919 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2920 while (!list_empty(&cache->pending[level])) {
2921 node = list_entry(cache->pending[level].next,
2922 struct backref_node, list);
2923 list_move_tail(&node->list, &list);
2924 BUG_ON(!node->pending);
2925
2926 if (!err) {
2927 ret = link_to_upper(trans, rc, node, path);
2928 if (ret < 0)
2929 err = ret;
2930 }
2931 }
2932 list_splice_init(&list, &cache->pending[level]);
2933 }
2934 return err;
2935}
2936
2937static void mark_block_processed(struct reloc_control *rc,
2938 u64 bytenr, u32 blocksize)
2939{
2940 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2941 EXTENT_DIRTY);
2942}
2943
2944static void __mark_block_processed(struct reloc_control *rc,
2945 struct backref_node *node)
2946{
2947 u32 blocksize;
2948 if (node->level == 0 ||
2949 in_block_group(node->bytenr, rc->block_group)) {
2950 blocksize = rc->extent_root->fs_info->nodesize;
2951 mark_block_processed(rc, node->bytenr, blocksize);
2952 }
2953 node->processed = 1;
2954}
2955
2956/*
2957 * mark a block and all blocks directly/indirectly reference the block
2958 * as processed.
2959 */
2960static void update_processed_blocks(struct reloc_control *rc,
2961 struct backref_node *node)
2962{
2963 struct backref_node *next = node;
2964 struct backref_edge *edge;
2965 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2966 int index = 0;
2967
2968 while (next) {
2969 cond_resched();
2970 while (1) {
2971 if (next->processed)
2972 break;
2973
2974 __mark_block_processed(rc, next);
2975
2976 if (list_empty(&next->upper))
2977 break;
2978
2979 edge = list_entry(next->upper.next,
2980 struct backref_edge, list[LOWER]);
2981 edges[index++] = edge;
2982 next = edge->node[UPPER];
2983 }
2984 next = walk_down_backref(edges, &index);
2985 }
2986}
2987
2988static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2989{
2990 u32 blocksize = rc->extent_root->fs_info->nodesize;
2991
2992 if (test_range_bit(&rc->processed_blocks, bytenr,
2993 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2994 return 1;
2995 return 0;
2996}
2997
2998static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2999 struct tree_block *block)
3000{
3001 struct extent_buffer *eb;
3002
3003 BUG_ON(block->key_ready);
3004 eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
3005 block->level, NULL);
3006 if (IS_ERR(eb)) {
3007 return PTR_ERR(eb);
3008 } else if (!extent_buffer_uptodate(eb)) {
3009 free_extent_buffer(eb);
3010 return -EIO;
3011 }
3012 if (block->level == 0)
3013 btrfs_item_key_to_cpu(eb, &block->key, 0);
3014 else
3015 btrfs_node_key_to_cpu(eb, &block->key, 0);
3016 free_extent_buffer(eb);
3017 block->key_ready = 1;
3018 return 0;
3019}
3020
3021/*
3022 * helper function to relocate a tree block
3023 */
3024static int relocate_tree_block(struct btrfs_trans_handle *trans,
3025 struct reloc_control *rc,
3026 struct backref_node *node,
3027 struct btrfs_key *key,
3028 struct btrfs_path *path)
3029{
3030 struct btrfs_root *root;
3031 int ret = 0;
3032
3033 if (!node)
3034 return 0;
3035
3036 BUG_ON(node->processed);
3037 root = select_one_root(node);
3038 if (root == ERR_PTR(-ENOENT)) {
3039 update_processed_blocks(rc, node);
3040 goto out;
3041 }
3042
3043 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3044 ret = reserve_metadata_space(trans, rc, node);
3045 if (ret)
3046 goto out;
3047 }
3048
3049 if (root) {
3050 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3051 BUG_ON(node->new_bytenr);
3052 BUG_ON(!list_empty(&node->list));
3053 btrfs_record_root_in_trans(trans, root);
3054 root = root->reloc_root;
3055 node->new_bytenr = root->node->start;
3056 node->root = root;
3057 list_add_tail(&node->list, &rc->backref_cache.changed);
3058 } else {
3059 path->lowest_level = node->level;
3060 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
3061 btrfs_release_path(path);
3062 if (ret > 0)
3063 ret = 0;
3064 }
3065 if (!ret)
3066 update_processed_blocks(rc, node);
3067 } else {
3068 ret = do_relocation(trans, rc, node, key, path, 1);
3069 }
3070out:
3071 if (ret || node->level == 0 || node->cowonly)
3072 remove_backref_node(&rc->backref_cache, node);
3073 return ret;
3074}
3075
3076/*
3077 * relocate a list of blocks
3078 */
3079static noinline_for_stack
3080int relocate_tree_blocks(struct btrfs_trans_handle *trans,
3081 struct reloc_control *rc, struct rb_root *blocks)
3082{
3083 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3084 struct backref_node *node;
3085 struct btrfs_path *path;
3086 struct tree_block *block;
3087 struct tree_block *next;
3088 int ret;
3089 int err = 0;
3090
3091 path = btrfs_alloc_path();
3092 if (!path) {
3093 err = -ENOMEM;
3094 goto out_free_blocks;
3095 }
3096
3097 /* Kick in readahead for tree blocks with missing keys */
3098 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3099 if (!block->key_ready)
3100 readahead_tree_block(fs_info, block->bytenr);
3101 }
3102
3103 /* Get first keys */
3104 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3105 if (!block->key_ready) {
3106 err = get_tree_block_key(fs_info, block);
3107 if (err)
3108 goto out_free_path;
3109 }
3110 }
3111
3112 /* Do tree relocation */
3113 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3114 node = build_backref_tree(rc, &block->key,
3115 block->level, block->bytenr);
3116 if (IS_ERR(node)) {
3117 err = PTR_ERR(node);
3118 goto out;
3119 }
3120
3121 ret = relocate_tree_block(trans, rc, node, &block->key,
3122 path);
3123 if (ret < 0) {
3124 if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
3125 err = ret;
3126 goto out;
3127 }
3128 }
3129out:
3130 err = finish_pending_nodes(trans, rc, path, err);
3131
3132out_free_path:
3133 btrfs_free_path(path);
3134out_free_blocks:
3135 free_block_list(blocks);
3136 return err;
3137}
3138
3139static noinline_for_stack
3140int prealloc_file_extent_cluster(struct inode *inode,
3141 struct file_extent_cluster *cluster)
3142{
3143 u64 alloc_hint = 0;
3144 u64 start;
3145 u64 end;
3146 u64 offset = BTRFS_I(inode)->index_cnt;
3147 u64 num_bytes;
3148 int nr = 0;
3149 int ret = 0;
3150 u64 prealloc_start = cluster->start - offset;
3151 u64 prealloc_end = cluster->end - offset;
3152 u64 cur_offset;
3153 struct extent_changeset *data_reserved = NULL;
3154
3155 BUG_ON(cluster->start != cluster->boundary[0]);
3156 inode_lock(inode);
3157
3158 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
3159 prealloc_end + 1 - prealloc_start);
3160 if (ret)
3161 goto out;
3162
3163 cur_offset = prealloc_start;
3164 while (nr < cluster->nr) {
3165 start = cluster->boundary[nr] - offset;
3166 if (nr + 1 < cluster->nr)
3167 end = cluster->boundary[nr + 1] - 1 - offset;
3168 else
3169 end = cluster->end - offset;
3170
3171 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3172 num_bytes = end + 1 - start;
3173 if (cur_offset < start)
3174 btrfs_free_reserved_data_space(inode, data_reserved,
3175 cur_offset, start - cur_offset);
3176 ret = btrfs_prealloc_file_range(inode, 0, start,
3177 num_bytes, num_bytes,
3178 end + 1, &alloc_hint);
3179 cur_offset = end + 1;
3180 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3181 if (ret)
3182 break;
3183 nr++;
3184 }
3185 if (cur_offset < prealloc_end)
3186 btrfs_free_reserved_data_space(inode, data_reserved,
3187 cur_offset, prealloc_end + 1 - cur_offset);
3188out:
3189 inode_unlock(inode);
3190 extent_changeset_free(data_reserved);
3191 return ret;
3192}
3193
3194static noinline_for_stack
3195int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
3196 u64 block_start)
3197{
3198 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3199 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3200 struct extent_map *em;
3201 int ret = 0;
3202
3203 em = alloc_extent_map();
3204 if (!em)
3205 return -ENOMEM;
3206
3207 em->start = start;
3208 em->len = end + 1 - start;
3209 em->block_len = em->len;
3210 em->block_start = block_start;
3211 em->bdev = fs_info->fs_devices->latest_bdev;
3212 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3213
3214 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3215 while (1) {
3216 write_lock(&em_tree->lock);
3217 ret = add_extent_mapping(em_tree, em, 0);
3218 write_unlock(&em_tree->lock);
3219 if (ret != -EEXIST) {
3220 free_extent_map(em);
3221 break;
3222 }
3223 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3224 }
3225 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3226 return ret;
3227}
3228
3229static int relocate_file_extent_cluster(struct inode *inode,
3230 struct file_extent_cluster *cluster)
3231{
3232 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3233 u64 page_start;
3234 u64 page_end;
3235 u64 offset = BTRFS_I(inode)->index_cnt;
3236 unsigned long index;
3237 unsigned long last_index;
3238 struct page *page;
3239 struct file_ra_state *ra;
3240 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
3241 int nr = 0;
3242 int ret = 0;
3243
3244 if (!cluster->nr)
3245 return 0;
3246
3247 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3248 if (!ra)
3249 return -ENOMEM;
3250
3251 ret = prealloc_file_extent_cluster(inode, cluster);
3252 if (ret)
3253 goto out;
3254
3255 file_ra_state_init(ra, inode->i_mapping);
3256
3257 ret = setup_extent_mapping(inode, cluster->start - offset,
3258 cluster->end - offset, cluster->start);
3259 if (ret)
3260 goto out;
3261
3262 index = (cluster->start - offset) >> PAGE_SHIFT;
3263 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3264 while (index <= last_index) {
3265 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3266 PAGE_SIZE);
3267 if (ret)
3268 goto out;
3269
3270 page = find_lock_page(inode->i_mapping, index);
3271 if (!page) {
3272 page_cache_sync_readahead(inode->i_mapping,
3273 ra, NULL, index,
3274 last_index + 1 - index);
3275 page = find_or_create_page(inode->i_mapping, index,
3276 mask);
3277 if (!page) {
3278 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3279 PAGE_SIZE, true);
3280 btrfs_delalloc_release_extents(BTRFS_I(inode),
3281 PAGE_SIZE);
3282 ret = -ENOMEM;
3283 goto out;
3284 }
3285 }
3286
3287 if (PageReadahead(page)) {
3288 page_cache_async_readahead(inode->i_mapping,
3289 ra, NULL, page, index,
3290 last_index + 1 - index);
3291 }
3292
3293 if (!PageUptodate(page)) {
3294 btrfs_readpage(NULL, page);
3295 lock_page(page);
3296 if (!PageUptodate(page)) {
3297 unlock_page(page);
3298 put_page(page);
3299 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3300 PAGE_SIZE, true);
3301 btrfs_delalloc_release_extents(BTRFS_I(inode),
3302 PAGE_SIZE);
3303 ret = -EIO;
3304 goto out;
3305 }
3306 }
3307
3308 page_start = page_offset(page);
3309 page_end = page_start + PAGE_SIZE - 1;
3310
3311 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3312
3313 set_page_extent_mapped(page);
3314
3315 if (nr < cluster->nr &&
3316 page_start + offset == cluster->boundary[nr]) {
3317 set_extent_bits(&BTRFS_I(inode)->io_tree,
3318 page_start, page_end,
3319 EXTENT_BOUNDARY);
3320 nr++;
3321 }
3322
3323 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
3324 NULL);
3325 if (ret) {
3326 unlock_page(page);
3327 put_page(page);
3328 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3329 PAGE_SIZE, true);
3330 btrfs_delalloc_release_extents(BTRFS_I(inode),
3331 PAGE_SIZE);
3332
3333 clear_extent_bits(&BTRFS_I(inode)->io_tree,
3334 page_start, page_end,
3335 EXTENT_LOCKED | EXTENT_BOUNDARY);
3336 goto out;
3337
3338 }
3339 set_page_dirty(page);
3340
3341 unlock_extent(&BTRFS_I(inode)->io_tree,
3342 page_start, page_end);
3343 unlock_page(page);
3344 put_page(page);
3345
3346 index++;
3347 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
3348 balance_dirty_pages_ratelimited(inode->i_mapping);
3349 btrfs_throttle(fs_info);
3350 }
3351 WARN_ON(nr != cluster->nr);
3352out:
3353 kfree(ra);
3354 return ret;
3355}
3356
3357static noinline_for_stack
3358int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3359 struct file_extent_cluster *cluster)
3360{
3361 int ret;
3362
3363 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3364 ret = relocate_file_extent_cluster(inode, cluster);
3365 if (ret)
3366 return ret;
3367 cluster->nr = 0;
3368 }
3369
3370 if (!cluster->nr)
3371 cluster->start = extent_key->objectid;
3372 else
3373 BUG_ON(cluster->nr >= MAX_EXTENTS);
3374 cluster->end = extent_key->objectid + extent_key->offset - 1;
3375 cluster->boundary[cluster->nr] = extent_key->objectid;
3376 cluster->nr++;
3377
3378 if (cluster->nr >= MAX_EXTENTS) {
3379 ret = relocate_file_extent_cluster(inode, cluster);
3380 if (ret)
3381 return ret;
3382 cluster->nr = 0;
3383 }
3384 return 0;
3385}
3386
3387/*
3388 * helper to add a tree block to the list.
3389 * the major work is getting the generation and level of the block
3390 */
3391static int add_tree_block(struct reloc_control *rc,
3392 struct btrfs_key *extent_key,
3393 struct btrfs_path *path,
3394 struct rb_root *blocks)
3395{
3396 struct extent_buffer *eb;
3397 struct btrfs_extent_item *ei;
3398 struct btrfs_tree_block_info *bi;
3399 struct tree_block *block;
3400 struct rb_node *rb_node;
3401 u32 item_size;
3402 int level = -1;
3403 u64 generation;
3404
3405 eb = path->nodes[0];
3406 item_size = btrfs_item_size_nr(eb, path->slots[0]);
3407
3408 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3409 item_size >= sizeof(*ei) + sizeof(*bi)) {
3410 ei = btrfs_item_ptr(eb, path->slots[0],
3411 struct btrfs_extent_item);
3412 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3413 bi = (struct btrfs_tree_block_info *)(ei + 1);
3414 level = btrfs_tree_block_level(eb, bi);
3415 } else {
3416 level = (int)extent_key->offset;
3417 }
3418 generation = btrfs_extent_generation(eb, ei);
3419 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3420 btrfs_print_v0_err(eb->fs_info);
3421 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3422 return -EINVAL;
3423 } else {
3424 BUG();
3425 }
3426
3427 btrfs_release_path(path);
3428
3429 BUG_ON(level == -1);
3430
3431 block = kmalloc(sizeof(*block), GFP_NOFS);
3432 if (!block)
3433 return -ENOMEM;
3434
3435 block->bytenr = extent_key->objectid;
3436 block->key.objectid = rc->extent_root->fs_info->nodesize;
3437 block->key.offset = generation;
3438 block->level = level;
3439 block->key_ready = 0;
3440
3441 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3442 if (rb_node)
3443 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3444
3445 return 0;
3446}
3447
3448/*
3449 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3450 */
3451static int __add_tree_block(struct reloc_control *rc,
3452 u64 bytenr, u32 blocksize,
3453 struct rb_root *blocks)
3454{
3455 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3456 struct btrfs_path *path;
3457 struct btrfs_key key;
3458 int ret;
3459 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3460
3461 if (tree_block_processed(bytenr, rc))
3462 return 0;
3463
3464 if (tree_search(blocks, bytenr))
3465 return 0;
3466
3467 path = btrfs_alloc_path();
3468 if (!path)
3469 return -ENOMEM;
3470again:
3471 key.objectid = bytenr;
3472 if (skinny) {
3473 key.type = BTRFS_METADATA_ITEM_KEY;
3474 key.offset = (u64)-1;
3475 } else {
3476 key.type = BTRFS_EXTENT_ITEM_KEY;
3477 key.offset = blocksize;
3478 }
3479
3480 path->search_commit_root = 1;
3481 path->skip_locking = 1;
3482 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3483 if (ret < 0)
3484 goto out;
3485
3486 if (ret > 0 && skinny) {
3487 if (path->slots[0]) {
3488 path->slots[0]--;
3489 btrfs_item_key_to_cpu(path->nodes[0], &key,
3490 path->slots[0]);
3491 if (key.objectid == bytenr &&
3492 (key.type == BTRFS_METADATA_ITEM_KEY ||
3493 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3494 key.offset == blocksize)))
3495 ret = 0;
3496 }
3497
3498 if (ret) {
3499 skinny = false;
3500 btrfs_release_path(path);
3501 goto again;
3502 }
3503 }
3504 if (ret) {
3505 ASSERT(ret == 1);
3506 btrfs_print_leaf(path->nodes[0]);
3507 btrfs_err(fs_info,
3508 "tree block extent item (%llu) is not found in extent tree",
3509 bytenr);
3510 WARN_ON(1);
3511 ret = -EINVAL;
3512 goto out;
3513 }
3514
3515 ret = add_tree_block(rc, &key, path, blocks);
3516out:
3517 btrfs_free_path(path);
3518 return ret;
3519}
3520
3521/*
3522 * helper to check if the block use full backrefs for pointers in it
3523 */
3524static int block_use_full_backref(struct reloc_control *rc,
3525 struct extent_buffer *eb)
3526{
3527 u64 flags;
3528 int ret;
3529
3530 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
3531 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
3532 return 1;
3533
3534 ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
3535 eb->start, btrfs_header_level(eb), 1,
3536 NULL, &flags);
3537 BUG_ON(ret);
3538
3539 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3540 ret = 1;
3541 else
3542 ret = 0;
3543 return ret;
3544}
3545
3546static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3547 struct btrfs_block_group_cache *block_group,
3548 struct inode *inode,
3549 u64 ino)
3550{
3551 struct btrfs_key key;
3552 struct btrfs_root *root = fs_info->tree_root;
3553 struct btrfs_trans_handle *trans;
3554 int ret = 0;
3555
3556 if (inode)
3557 goto truncate;
3558
3559 key.objectid = ino;
3560 key.type = BTRFS_INODE_ITEM_KEY;
3561 key.offset = 0;
3562
3563 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3564 if (IS_ERR(inode))
3565 return -ENOENT;
3566
3567truncate:
3568 ret = btrfs_check_trunc_cache_free_space(fs_info,
3569 &fs_info->global_block_rsv);
3570 if (ret)
3571 goto out;
3572
3573 trans = btrfs_join_transaction(root);
3574 if (IS_ERR(trans)) {
3575 ret = PTR_ERR(trans);
3576 goto out;
3577 }
3578
3579 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3580
3581 btrfs_end_transaction(trans);
3582 btrfs_btree_balance_dirty(fs_info);
3583out:
3584 iput(inode);
3585 return ret;
3586}
3587
3588/*
3589 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
3590 * this function scans fs tree to find blocks reference the data extent
3591 */
3592static int find_data_references(struct reloc_control *rc,
3593 struct btrfs_key *extent_key,
3594 struct extent_buffer *leaf,
3595 struct btrfs_extent_data_ref *ref,
3596 struct rb_root *blocks)
3597{
3598 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3599 struct btrfs_path *path;
3600 struct tree_block *block;
3601 struct btrfs_root *root;
3602 struct btrfs_file_extent_item *fi;
3603 struct rb_node *rb_node;
3604 struct btrfs_key key;
3605 u64 ref_root;
3606 u64 ref_objectid;
3607 u64 ref_offset;
3608 u32 ref_count;
3609 u32 nritems;
3610 int err = 0;
3611 int added = 0;
3612 int counted;
3613 int ret;
3614
3615 ref_root = btrfs_extent_data_ref_root(leaf, ref);
3616 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
3617 ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
3618 ref_count = btrfs_extent_data_ref_count(leaf, ref);
3619
3620 /*
3621 * This is an extent belonging to the free space cache, lets just delete
3622 * it and redo the search.
3623 */
3624 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
3625 ret = delete_block_group_cache(fs_info, rc->block_group,
3626 NULL, ref_objectid);
3627 if (ret != -ENOENT)
3628 return ret;
3629 ret = 0;
3630 }
3631
3632 path = btrfs_alloc_path();
3633 if (!path)
3634 return -ENOMEM;
3635 path->reada = READA_FORWARD;
3636
3637 root = read_fs_root(fs_info, ref_root);
3638 if (IS_ERR(root)) {
3639 err = PTR_ERR(root);
3640 goto out;
3641 }
3642
3643 key.objectid = ref_objectid;
3644 key.type = BTRFS_EXTENT_DATA_KEY;
3645 if (ref_offset > ((u64)-1 << 32))
3646 key.offset = 0;
3647 else
3648 key.offset = ref_offset;
3649
3650 path->search_commit_root = 1;
3651 path->skip_locking = 1;
3652 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3653 if (ret < 0) {
3654 err = ret;
3655 goto out;
3656 }
3657
3658 leaf = path->nodes[0];
3659 nritems = btrfs_header_nritems(leaf);
3660 /*
3661 * the references in tree blocks that use full backrefs
3662 * are not counted in
3663 */
3664 if (block_use_full_backref(rc, leaf))
3665 counted = 0;
3666 else
3667 counted = 1;
3668 rb_node = tree_search(blocks, leaf->start);
3669 if (rb_node) {
3670 if (counted)
3671 added = 1;
3672 else
3673 path->slots[0] = nritems;
3674 }
3675
3676 while (ref_count > 0) {
3677 while (path->slots[0] >= nritems) {
3678 ret = btrfs_next_leaf(root, path);
3679 if (ret < 0) {
3680 err = ret;
3681 goto out;
3682 }
3683 if (WARN_ON(ret > 0))
3684 goto out;
3685
3686 leaf = path->nodes[0];
3687 nritems = btrfs_header_nritems(leaf);
3688 added = 0;
3689
3690 if (block_use_full_backref(rc, leaf))
3691 counted = 0;
3692 else
3693 counted = 1;
3694 rb_node = tree_search(blocks, leaf->start);
3695 if (rb_node) {
3696 if (counted)
3697 added = 1;
3698 else
3699 path->slots[0] = nritems;
3700 }
3701 }
3702
3703 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3704 if (WARN_ON(key.objectid != ref_objectid ||
3705 key.type != BTRFS_EXTENT_DATA_KEY))
3706 break;
3707
3708 fi = btrfs_item_ptr(leaf, path->slots[0],
3709 struct btrfs_file_extent_item);
3710
3711 if (btrfs_file_extent_type(leaf, fi) ==
3712 BTRFS_FILE_EXTENT_INLINE)
3713 goto next;
3714
3715 if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3716 extent_key->objectid)
3717 goto next;
3718
3719 key.offset -= btrfs_file_extent_offset(leaf, fi);
3720 if (key.offset != ref_offset)
3721 goto next;
3722
3723 if (counted)
3724 ref_count--;
3725 if (added)
3726 goto next;
3727
3728 if (!tree_block_processed(leaf->start, rc)) {
3729 block = kmalloc(sizeof(*block), GFP_NOFS);
3730 if (!block) {
3731 err = -ENOMEM;
3732 break;
3733 }
3734 block->bytenr = leaf->start;
3735 btrfs_item_key_to_cpu(leaf, &block->key, 0);
3736 block->level = 0;
3737 block->key_ready = 1;
3738 rb_node = tree_insert(blocks, block->bytenr,
3739 &block->rb_node);
3740 if (rb_node)
3741 backref_tree_panic(rb_node, -EEXIST,
3742 block->bytenr);
3743 }
3744 if (counted)
3745 added = 1;
3746 else
3747 path->slots[0] = nritems;
3748next:
3749 path->slots[0]++;
3750
3751 }
3752out:
3753 btrfs_free_path(path);
3754 return err;
3755}
3756
3757/*
3758 * helper to find all tree blocks that reference a given data extent
3759 */
3760static noinline_for_stack
3761int add_data_references(struct reloc_control *rc,
3762 struct btrfs_key *extent_key,
3763 struct btrfs_path *path,
3764 struct rb_root *blocks)
3765{
3766 struct btrfs_key key;
3767 struct extent_buffer *eb;
3768 struct btrfs_extent_data_ref *dref;
3769 struct btrfs_extent_inline_ref *iref;
3770 unsigned long ptr;
3771 unsigned long end;
3772 u32 blocksize = rc->extent_root->fs_info->nodesize;
3773 int ret = 0;
3774 int err = 0;
3775
3776 eb = path->nodes[0];
3777 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3778 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3779 ptr += sizeof(struct btrfs_extent_item);
3780
3781 while (ptr < end) {
3782 iref = (struct btrfs_extent_inline_ref *)ptr;
3783 key.type = btrfs_get_extent_inline_ref_type(eb, iref,
3784 BTRFS_REF_TYPE_DATA);
3785 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3786 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3787 ret = __add_tree_block(rc, key.offset, blocksize,
3788 blocks);
3789 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3790 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
3791 ret = find_data_references(rc, extent_key,
3792 eb, dref, blocks);
3793 } else {
3794 ret = -EUCLEAN;
3795 btrfs_err(rc->extent_root->fs_info,
3796 "extent %llu slot %d has an invalid inline ref type",
3797 eb->start, path->slots[0]);
3798 }
3799 if (ret) {
3800 err = ret;
3801 goto out;
3802 }
3803 ptr += btrfs_extent_inline_ref_size(key.type);
3804 }
3805 WARN_ON(ptr > end);
3806
3807 while (1) {
3808 cond_resched();
3809 eb = path->nodes[0];
3810 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3811 ret = btrfs_next_leaf(rc->extent_root, path);
3812 if (ret < 0) {
3813 err = ret;
3814 break;
3815 }
3816 if (ret > 0)
3817 break;
3818 eb = path->nodes[0];
3819 }
3820
3821 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3822 if (key.objectid != extent_key->objectid)
3823 break;
3824
3825 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3826 ret = __add_tree_block(rc, key.offset, blocksize,
3827 blocks);
3828 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3829 dref = btrfs_item_ptr(eb, path->slots[0],
3830 struct btrfs_extent_data_ref);
3831 ret = find_data_references(rc, extent_key,
3832 eb, dref, blocks);
3833 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3834 btrfs_print_v0_err(eb->fs_info);
3835 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3836 ret = -EINVAL;
3837 } else {
3838 ret = 0;
3839 }
3840 if (ret) {
3841 err = ret;
3842 break;
3843 }
3844 path->slots[0]++;
3845 }
3846out:
3847 btrfs_release_path(path);
3848 if (err)
3849 free_block_list(blocks);
3850 return err;
3851}
3852
3853/*
3854 * helper to find next unprocessed extent
3855 */
3856static noinline_for_stack
3857int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3858 struct btrfs_key *extent_key)
3859{
3860 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3861 struct btrfs_key key;
3862 struct extent_buffer *leaf;
3863 u64 start, end, last;
3864 int ret;
3865
3866 last = rc->block_group->key.objectid + rc->block_group->key.offset;
3867 while (1) {
3868 cond_resched();
3869 if (rc->search_start >= last) {
3870 ret = 1;
3871 break;
3872 }
3873
3874 key.objectid = rc->search_start;
3875 key.type = BTRFS_EXTENT_ITEM_KEY;
3876 key.offset = 0;
3877
3878 path->search_commit_root = 1;
3879 path->skip_locking = 1;
3880 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3881 0, 0);
3882 if (ret < 0)
3883 break;
3884next:
3885 leaf = path->nodes[0];
3886 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3887 ret = btrfs_next_leaf(rc->extent_root, path);
3888 if (ret != 0)
3889 break;
3890 leaf = path->nodes[0];
3891 }
3892
3893 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3894 if (key.objectid >= last) {
3895 ret = 1;
3896 break;
3897 }
3898
3899 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3900 key.type != BTRFS_METADATA_ITEM_KEY) {
3901 path->slots[0]++;
3902 goto next;
3903 }
3904
3905 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3906 key.objectid + key.offset <= rc->search_start) {
3907 path->slots[0]++;
3908 goto next;
3909 }
3910
3911 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3912 key.objectid + fs_info->nodesize <=
3913 rc->search_start) {
3914 path->slots[0]++;
3915 goto next;
3916 }
3917
3918 ret = find_first_extent_bit(&rc->processed_blocks,
3919 key.objectid, &start, &end,
3920 EXTENT_DIRTY, NULL);
3921
3922 if (ret == 0 && start <= key.objectid) {
3923 btrfs_release_path(path);
3924 rc->search_start = end + 1;
3925 } else {
3926 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3927 rc->search_start = key.objectid + key.offset;
3928 else
3929 rc->search_start = key.objectid +
3930 fs_info->nodesize;
3931 memcpy(extent_key, &key, sizeof(key));
3932 return 0;
3933 }
3934 }
3935 btrfs_release_path(path);
3936 return ret;
3937}
3938
3939static void set_reloc_control(struct reloc_control *rc)
3940{
3941 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3942
3943 mutex_lock(&fs_info->reloc_mutex);
3944 fs_info->reloc_ctl = rc;
3945 mutex_unlock(&fs_info->reloc_mutex);
3946}
3947
3948static void unset_reloc_control(struct reloc_control *rc)
3949{
3950 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3951
3952 mutex_lock(&fs_info->reloc_mutex);
3953 fs_info->reloc_ctl = NULL;
3954 mutex_unlock(&fs_info->reloc_mutex);
3955}
3956
3957static int check_extent_flags(u64 flags)
3958{
3959 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3960 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3961 return 1;
3962 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3963 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3964 return 1;
3965 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3966 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3967 return 1;
3968 return 0;
3969}
3970
3971static noinline_for_stack
3972int prepare_to_relocate(struct reloc_control *rc)
3973{
3974 struct btrfs_trans_handle *trans;
3975 int ret;
3976
3977 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3978 BTRFS_BLOCK_RSV_TEMP);
3979 if (!rc->block_rsv)
3980 return -ENOMEM;
3981
3982 memset(&rc->cluster, 0, sizeof(rc->cluster));
3983 rc->search_start = rc->block_group->key.objectid;
3984 rc->extents_found = 0;
3985 rc->nodes_relocated = 0;
3986 rc->merging_rsv_size = 0;
3987 rc->reserved_bytes = 0;
3988 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3989 RELOCATION_RESERVED_NODES;
3990 ret = btrfs_block_rsv_refill(rc->extent_root,
3991 rc->block_rsv, rc->block_rsv->size,
3992 BTRFS_RESERVE_FLUSH_ALL);
3993 if (ret)
3994 return ret;
3995
3996 rc->create_reloc_tree = 1;
3997 set_reloc_control(rc);
3998
3999 trans = btrfs_join_transaction(rc->extent_root);
4000 if (IS_ERR(trans)) {
4001 unset_reloc_control(rc);
4002 /*
4003 * extent tree is not a ref_cow tree and has no reloc_root to
4004 * cleanup. And callers are responsible to free the above
4005 * block rsv.
4006 */
4007 return PTR_ERR(trans);
4008 }
4009 btrfs_commit_transaction(trans);
4010 return 0;
4011}
4012
4013static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
4014{
4015 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
4016 struct rb_root blocks = RB_ROOT;
4017 struct btrfs_key key;
4018 struct btrfs_trans_handle *trans = NULL;
4019 struct btrfs_path *path;
4020 struct btrfs_extent_item *ei;
4021 u64 flags;
4022 u32 item_size;
4023 int ret;
4024 int err = 0;
4025 int progress = 0;
4026
4027 path = btrfs_alloc_path();
4028 if (!path)
4029 return -ENOMEM;
4030 path->reada = READA_FORWARD;
4031
4032 ret = prepare_to_relocate(rc);
4033 if (ret) {
4034 err = ret;
4035 goto out_free;
4036 }
4037
4038 while (1) {
4039 rc->reserved_bytes = 0;
4040 ret = btrfs_block_rsv_refill(rc->extent_root,
4041 rc->block_rsv, rc->block_rsv->size,
4042 BTRFS_RESERVE_FLUSH_ALL);
4043 if (ret) {
4044 err = ret;
4045 break;
4046 }
4047 progress++;
4048 trans = btrfs_start_transaction(rc->extent_root, 0);
4049 if (IS_ERR(trans)) {
4050 err = PTR_ERR(trans);
4051 trans = NULL;
4052 break;
4053 }
4054restart:
4055 if (update_backref_cache(trans, &rc->backref_cache)) {
4056 btrfs_end_transaction(trans);
4057 trans = NULL;
4058 continue;
4059 }
4060
4061 ret = find_next_extent(rc, path, &key);
4062 if (ret < 0)
4063 err = ret;
4064 if (ret != 0)
4065 break;
4066
4067 rc->extents_found++;
4068
4069 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4070 struct btrfs_extent_item);
4071 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
4072 if (item_size >= sizeof(*ei)) {
4073 flags = btrfs_extent_flags(path->nodes[0], ei);
4074 ret = check_extent_flags(flags);
4075 BUG_ON(ret);
4076 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
4077 err = -EINVAL;
4078 btrfs_print_v0_err(trans->fs_info);
4079 btrfs_abort_transaction(trans, err);
4080 break;
4081 } else {
4082 BUG();
4083 }
4084
4085 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
4086 ret = add_tree_block(rc, &key, path, &blocks);
4087 } else if (rc->stage == UPDATE_DATA_PTRS &&
4088 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4089 ret = add_data_references(rc, &key, path, &blocks);
4090 } else {
4091 btrfs_release_path(path);
4092 ret = 0;
4093 }
4094 if (ret < 0) {
4095 err = ret;
4096 break;
4097 }
4098
4099 if (!RB_EMPTY_ROOT(&blocks)) {
4100 ret = relocate_tree_blocks(trans, rc, &blocks);
4101 if (ret < 0) {
4102 /*
4103 * if we fail to relocate tree blocks, force to update
4104 * backref cache when committing transaction.
4105 */
4106 rc->backref_cache.last_trans = trans->transid - 1;
4107
4108 if (ret != -EAGAIN) {
4109 err = ret;
4110 break;
4111 }
4112 rc->extents_found--;
4113 rc->search_start = key.objectid;
4114 }
4115 }
4116
4117 btrfs_end_transaction_throttle(trans);
4118 btrfs_btree_balance_dirty(fs_info);
4119 trans = NULL;
4120
4121 if (rc->stage == MOVE_DATA_EXTENTS &&
4122 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4123 rc->found_file_extent = 1;
4124 ret = relocate_data_extent(rc->data_inode,
4125 &key, &rc->cluster);
4126 if (ret < 0) {
4127 err = ret;
4128 break;
4129 }
4130 }
4131 }
4132 if (trans && progress && err == -ENOSPC) {
4133 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
4134 if (ret == 1) {
4135 err = 0;
4136 progress = 0;
4137 goto restart;
4138 }
4139 }
4140
4141 btrfs_release_path(path);
4142 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
4143
4144 if (trans) {
4145 btrfs_end_transaction_throttle(trans);
4146 btrfs_btree_balance_dirty(fs_info);
4147 }
4148
4149 if (!err) {
4150 ret = relocate_file_extent_cluster(rc->data_inode,
4151 &rc->cluster);
4152 if (ret < 0)
4153 err = ret;
4154 }
4155
4156 rc->create_reloc_tree = 0;
4157 set_reloc_control(rc);
4158
4159 backref_cache_cleanup(&rc->backref_cache);
4160 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
4161
4162 err = prepare_to_merge(rc, err);
4163
4164 merge_reloc_roots(rc);
4165
4166 rc->merge_reloc_tree = 0;
4167 unset_reloc_control(rc);
4168 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
4169
4170 /* get rid of pinned extents */
4171 trans = btrfs_join_transaction(rc->extent_root);
4172 if (IS_ERR(trans)) {
4173 err = PTR_ERR(trans);
4174 goto out_free;
4175 }
4176 btrfs_commit_transaction(trans);
4177 ret = clean_dirty_subvols(rc);
4178 if (ret < 0 && !err)
4179 err = ret;
4180out_free:
4181 btrfs_free_block_rsv(fs_info, rc->block_rsv);
4182 btrfs_free_path(path);
4183 return err;
4184}
4185
4186static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4187 struct btrfs_root *root, u64 objectid)
4188{
4189 struct btrfs_path *path;
4190 struct btrfs_inode_item *item;
4191 struct extent_buffer *leaf;
4192 int ret;
4193
4194 path = btrfs_alloc_path();
4195 if (!path)
4196 return -ENOMEM;
4197
4198 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4199 if (ret)
4200 goto out;
4201
4202 leaf = path->nodes[0];
4203 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4204 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
4205 btrfs_set_inode_generation(leaf, item, 1);
4206 btrfs_set_inode_size(leaf, item, 0);
4207 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4208 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
4209 BTRFS_INODE_PREALLOC);
4210 btrfs_mark_buffer_dirty(leaf);
4211out:
4212 btrfs_free_path(path);
4213 return ret;
4214}
4215
4216/*
4217 * helper to create inode for data relocation.
4218 * the inode is in data relocation tree and its link count is 0
4219 */
4220static noinline_for_stack
4221struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
4222 struct btrfs_block_group_cache *group)
4223{
4224 struct inode *inode = NULL;
4225 struct btrfs_trans_handle *trans;
4226 struct btrfs_root *root;
4227 struct btrfs_key key;
4228 u64 objectid;
4229 int err = 0;
4230
4231 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4232 if (IS_ERR(root))
4233 return ERR_CAST(root);
4234
4235 trans = btrfs_start_transaction(root, 6);
4236 if (IS_ERR(trans))
4237 return ERR_CAST(trans);
4238
4239 err = btrfs_find_free_objectid(root, &objectid);
4240 if (err)
4241 goto out;
4242
4243 err = __insert_orphan_inode(trans, root, objectid);
4244 BUG_ON(err);
4245
4246 key.objectid = objectid;
4247 key.type = BTRFS_INODE_ITEM_KEY;
4248 key.offset = 0;
4249 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4250 BUG_ON(IS_ERR(inode));
4251 BTRFS_I(inode)->index_cnt = group->key.objectid;
4252
4253 err = btrfs_orphan_add(trans, BTRFS_I(inode));
4254out:
4255 btrfs_end_transaction(trans);
4256 btrfs_btree_balance_dirty(fs_info);
4257 if (err) {
4258 if (inode)
4259 iput(inode);
4260 inode = ERR_PTR(err);
4261 }
4262 return inode;
4263}
4264
4265static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
4266{
4267 struct reloc_control *rc;
4268
4269 rc = kzalloc(sizeof(*rc), GFP_NOFS);
4270 if (!rc)
4271 return NULL;
4272
4273 INIT_LIST_HEAD(&rc->reloc_roots);
4274 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
4275 backref_cache_init(&rc->backref_cache);
4276 mapping_tree_init(&rc->reloc_root_tree);
4277 extent_io_tree_init(fs_info, &rc->processed_blocks,
4278 IO_TREE_RELOC_BLOCKS, NULL);
4279 return rc;
4280}
4281
4282/*
4283 * Print the block group being relocated
4284 */
4285static void describe_relocation(struct btrfs_fs_info *fs_info,
4286 struct btrfs_block_group_cache *block_group)
4287{
4288 char buf[128] = {'\0'};
4289
4290 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
4291
4292 btrfs_info(fs_info,
4293 "relocating block group %llu flags %s",
4294 block_group->key.objectid, buf);
4295}
4296
4297/*
4298 * function to relocate all extents in a block group.
4299 */
4300int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
4301{
4302 struct btrfs_block_group_cache *bg;
4303 struct btrfs_root *extent_root = fs_info->extent_root;
4304 struct reloc_control *rc;
4305 struct inode *inode;
4306 struct btrfs_path *path;
4307 int ret;
4308 int rw = 0;
4309 int err = 0;
4310
4311 bg = btrfs_lookup_block_group(fs_info, group_start);
4312 if (!bg)
4313 return -ENOENT;
4314
4315 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4316 btrfs_put_block_group(bg);
4317 return -ETXTBSY;
4318 }
4319
4320 rc = alloc_reloc_control(fs_info);
4321 if (!rc) {
4322 btrfs_put_block_group(bg);
4323 return -ENOMEM;
4324 }
4325
4326 rc->extent_root = extent_root;
4327 rc->block_group = bg;
4328
4329 ret = btrfs_inc_block_group_ro(rc->block_group);
4330 if (ret) {
4331 err = ret;
4332 goto out;
4333 }
4334 rw = 1;
4335
4336 path = btrfs_alloc_path();
4337 if (!path) {
4338 err = -ENOMEM;
4339 goto out;
4340 }
4341
4342 inode = lookup_free_space_inode(rc->block_group, path);
4343 btrfs_free_path(path);
4344
4345 if (!IS_ERR(inode))
4346 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4347 else
4348 ret = PTR_ERR(inode);
4349
4350 if (ret && ret != -ENOENT) {
4351 err = ret;
4352 goto out;
4353 }
4354
4355 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4356 if (IS_ERR(rc->data_inode)) {
4357 err = PTR_ERR(rc->data_inode);
4358 rc->data_inode = NULL;
4359 goto out;
4360 }
4361
4362 describe_relocation(fs_info, rc->block_group);
4363
4364 btrfs_wait_block_group_reservations(rc->block_group);
4365 btrfs_wait_nocow_writers(rc->block_group);
4366 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4367 rc->block_group->key.objectid,
4368 rc->block_group->key.offset);
4369
4370 while (1) {
4371 mutex_lock(&fs_info->cleaner_mutex);
4372 ret = relocate_block_group(rc);
4373 mutex_unlock(&fs_info->cleaner_mutex);
4374 if (ret < 0)
4375 err = ret;
4376
4377 /*
4378 * We may have gotten ENOSPC after we already dirtied some
4379 * extents. If writeout happens while we're relocating a
4380 * different block group we could end up hitting the
4381 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4382 * btrfs_reloc_cow_block. Make sure we write everything out
4383 * properly so we don't trip over this problem, and then break
4384 * out of the loop if we hit an error.
4385 */
4386 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4387 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4388 (u64)-1);
4389 if (ret)
4390 err = ret;
4391 invalidate_mapping_pages(rc->data_inode->i_mapping,
4392 0, -1);
4393 rc->stage = UPDATE_DATA_PTRS;
4394 }
4395
4396 if (err < 0)
4397 goto out;
4398
4399 if (rc->extents_found == 0)
4400 break;
4401
4402 btrfs_info(fs_info, "found %llu extents", rc->extents_found);
4403
4404 }
4405
4406 WARN_ON(rc->block_group->pinned > 0);
4407 WARN_ON(rc->block_group->reserved > 0);
4408 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
4409out:
4410 if (err && rw)
4411 btrfs_dec_block_group_ro(rc->block_group);
4412 iput(rc->data_inode);
4413 btrfs_put_block_group(rc->block_group);
4414 kfree(rc);
4415 return err;
4416}
4417
4418static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4419{
4420 struct btrfs_fs_info *fs_info = root->fs_info;
4421 struct btrfs_trans_handle *trans;
4422 int ret, err;
4423
4424 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4425 if (IS_ERR(trans))
4426 return PTR_ERR(trans);
4427
4428 memset(&root->root_item.drop_progress, 0,
4429 sizeof(root->root_item.drop_progress));
4430 root->root_item.drop_level = 0;
4431 btrfs_set_root_refs(&root->root_item, 0);
4432 ret = btrfs_update_root(trans, fs_info->tree_root,
4433 &root->root_key, &root->root_item);
4434
4435 err = btrfs_end_transaction(trans);
4436 if (err)
4437 return err;
4438 return ret;
4439}
4440
4441/*
4442 * recover relocation interrupted by system crash.
4443 *
4444 * this function resumes merging reloc trees with corresponding fs trees.
4445 * this is important for keeping the sharing of tree blocks
4446 */
4447int btrfs_recover_relocation(struct btrfs_root *root)
4448{
4449 struct btrfs_fs_info *fs_info = root->fs_info;
4450 LIST_HEAD(reloc_roots);
4451 struct btrfs_key key;
4452 struct btrfs_root *fs_root;
4453 struct btrfs_root *reloc_root;
4454 struct btrfs_path *path;
4455 struct extent_buffer *leaf;
4456 struct reloc_control *rc = NULL;
4457 struct btrfs_trans_handle *trans;
4458 int ret;
4459 int err = 0;
4460
4461 path = btrfs_alloc_path();
4462 if (!path)
4463 return -ENOMEM;
4464 path->reada = READA_BACK;
4465
4466 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4467 key.type = BTRFS_ROOT_ITEM_KEY;
4468 key.offset = (u64)-1;
4469
4470 while (1) {
4471 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4472 path, 0, 0);
4473 if (ret < 0) {
4474 err = ret;
4475 goto out;
4476 }
4477 if (ret > 0) {
4478 if (path->slots[0] == 0)
4479 break;
4480 path->slots[0]--;
4481 }
4482 leaf = path->nodes[0];
4483 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4484 btrfs_release_path(path);
4485
4486 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4487 key.type != BTRFS_ROOT_ITEM_KEY)
4488 break;
4489
4490 reloc_root = btrfs_read_fs_root(root, &key);
4491 if (IS_ERR(reloc_root)) {
4492 err = PTR_ERR(reloc_root);
4493 goto out;
4494 }
4495
4496 list_add(&reloc_root->root_list, &reloc_roots);
4497
4498 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4499 fs_root = read_fs_root(fs_info,
4500 reloc_root->root_key.offset);
4501 if (IS_ERR(fs_root)) {
4502 ret = PTR_ERR(fs_root);
4503 if (ret != -ENOENT) {
4504 err = ret;
4505 goto out;
4506 }
4507 ret = mark_garbage_root(reloc_root);
4508 if (ret < 0) {
4509 err = ret;
4510 goto out;
4511 }
4512 }
4513 }
4514
4515 if (key.offset == 0)
4516 break;
4517
4518 key.offset--;
4519 }
4520 btrfs_release_path(path);
4521
4522 if (list_empty(&reloc_roots))
4523 goto out;
4524
4525 rc = alloc_reloc_control(fs_info);
4526 if (!rc) {
4527 err = -ENOMEM;
4528 goto out;
4529 }
4530
4531 rc->extent_root = fs_info->extent_root;
4532
4533 set_reloc_control(rc);
4534
4535 trans = btrfs_join_transaction(rc->extent_root);
4536 if (IS_ERR(trans)) {
4537 unset_reloc_control(rc);
4538 err = PTR_ERR(trans);
4539 goto out_free;
4540 }
4541
4542 rc->merge_reloc_tree = 1;
4543
4544 while (!list_empty(&reloc_roots)) {
4545 reloc_root = list_entry(reloc_roots.next,
4546 struct btrfs_root, root_list);
4547 list_del(&reloc_root->root_list);
4548
4549 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4550 list_add_tail(&reloc_root->root_list,
4551 &rc->reloc_roots);
4552 continue;
4553 }
4554
4555 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
4556 if (IS_ERR(fs_root)) {
4557 err = PTR_ERR(fs_root);
4558 goto out_free;
4559 }
4560
4561 err = __add_reloc_root(reloc_root);
4562 BUG_ON(err < 0); /* -ENOMEM or logic error */
4563 fs_root->reloc_root = reloc_root;
4564 }
4565
4566 err = btrfs_commit_transaction(trans);
4567 if (err)
4568 goto out_free;
4569
4570 merge_reloc_roots(rc);
4571
4572 unset_reloc_control(rc);
4573
4574 trans = btrfs_join_transaction(rc->extent_root);
4575 if (IS_ERR(trans)) {
4576 err = PTR_ERR(trans);
4577 goto out_free;
4578 }
4579 err = btrfs_commit_transaction(trans);
4580
4581 ret = clean_dirty_subvols(rc);
4582 if (ret < 0 && !err)
4583 err = ret;
4584out_free:
4585 kfree(rc);
4586out:
4587 if (!list_empty(&reloc_roots))
4588 free_reloc_roots(&reloc_roots);
4589
4590 btrfs_free_path(path);
4591
4592 if (err == 0) {
4593 /* cleanup orphan inode in data relocation tree */
4594 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4595 if (IS_ERR(fs_root))
4596 err = PTR_ERR(fs_root);
4597 else
4598 err = btrfs_orphan_cleanup(fs_root);
4599 }
4600 return err;
4601}
4602
4603/*
4604 * helper to add ordered checksum for data relocation.
4605 *
4606 * cloning checksum properly handles the nodatasum extents.
4607 * it also saves CPU time to re-calculate the checksum.
4608 */
4609int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4610{
4611 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4612 struct btrfs_ordered_sum *sums;
4613 struct btrfs_ordered_extent *ordered;
4614 int ret;
4615 u64 disk_bytenr;
4616 u64 new_bytenr;
4617 LIST_HEAD(list);
4618
4619 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4620 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
4621
4622 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4623 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
4624 disk_bytenr + len - 1, &list, 0);
4625 if (ret)
4626 goto out;
4627
4628 while (!list_empty(&list)) {
4629 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4630 list_del_init(&sums->list);
4631
4632 /*
4633 * We need to offset the new_bytenr based on where the csum is.
4634 * We need to do this because we will read in entire prealloc
4635 * extents but we may have written to say the middle of the
4636 * prealloc extent, so we need to make sure the csum goes with
4637 * the right disk offset.
4638 *
4639 * We can do this because the data reloc inode refers strictly
4640 * to the on disk bytes, so we don't have to worry about
4641 * disk_len vs real len like with real inodes since it's all
4642 * disk length.
4643 */
4644 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
4645 sums->bytenr = new_bytenr;
4646
4647 btrfs_add_ordered_sum(ordered, sums);
4648 }
4649out:
4650 btrfs_put_ordered_extent(ordered);
4651 return ret;
4652}
4653
4654int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4655 struct btrfs_root *root, struct extent_buffer *buf,
4656 struct extent_buffer *cow)
4657{
4658 struct btrfs_fs_info *fs_info = root->fs_info;
4659 struct reloc_control *rc;
4660 struct backref_node *node;
4661 int first_cow = 0;
4662 int level;
4663 int ret = 0;
4664
4665 rc = fs_info->reloc_ctl;
4666 if (!rc)
4667 return 0;
4668
4669 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4670 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4671
4672 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
4673 if (buf == root->node)
4674 __update_reloc_root(root, cow->start);
4675 }
4676
4677 level = btrfs_header_level(buf);
4678 if (btrfs_header_generation(buf) <=
4679 btrfs_root_last_snapshot(&root->root_item))
4680 first_cow = 1;
4681
4682 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4683 rc->create_reloc_tree) {
4684 WARN_ON(!first_cow && level == 0);
4685
4686 node = rc->backref_cache.path[level];
4687 BUG_ON(node->bytenr != buf->start &&
4688 node->new_bytenr != buf->start);
4689
4690 drop_node_buffer(node);
4691 extent_buffer_get(cow);
4692 node->eb = cow;
4693 node->new_bytenr = cow->start;
4694
4695 if (!node->pending) {
4696 list_move_tail(&node->list,
4697 &rc->backref_cache.pending[level]);
4698 node->pending = 1;
4699 }
4700
4701 if (first_cow)
4702 __mark_block_processed(rc, node);
4703
4704 if (first_cow && level > 0)
4705 rc->nodes_relocated += buf->len;
4706 }
4707
4708 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4709 ret = replace_file_extents(trans, rc, root, cow);
4710 return ret;
4711}
4712
4713/*
4714 * called before creating snapshot. it calculates metadata reservation
4715 * required for relocating tree blocks in the snapshot
4716 */
4717void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4718 u64 *bytes_to_reserve)
4719{
4720 struct btrfs_root *root = pending->root;
4721 struct reloc_control *rc = root->fs_info->reloc_ctl;
4722
4723 if (!root->reloc_root || !rc)
4724 return;
4725
4726 if (!rc->merge_reloc_tree)
4727 return;
4728
4729 root = root->reloc_root;
4730 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4731 /*
4732 * relocation is in the stage of merging trees. the space
4733 * used by merging a reloc tree is twice the size of
4734 * relocated tree nodes in the worst case. half for cowing
4735 * the reloc tree, half for cowing the fs tree. the space
4736 * used by cowing the reloc tree will be freed after the
4737 * tree is dropped. if we create snapshot, cowing the fs
4738 * tree may use more space than it frees. so we need
4739 * reserve extra space.
4740 */
4741 *bytes_to_reserve += rc->nodes_relocated;
4742}
4743
4744/*
4745 * called after snapshot is created. migrate block reservation
4746 * and create reloc root for the newly created snapshot
4747 */
4748int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4749 struct btrfs_pending_snapshot *pending)
4750{
4751 struct btrfs_root *root = pending->root;
4752 struct btrfs_root *reloc_root;
4753 struct btrfs_root *new_root;
4754 struct reloc_control *rc = root->fs_info->reloc_ctl;
4755 int ret;
4756
4757 if (!root->reloc_root || !rc)
4758 return 0;
4759
4760 rc = root->fs_info->reloc_ctl;
4761 rc->merging_rsv_size += rc->nodes_relocated;
4762
4763 if (rc->merge_reloc_tree) {
4764 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4765 rc->block_rsv,
4766 rc->nodes_relocated, true);
4767 if (ret)
4768 return ret;
4769 }
4770
4771 new_root = pending->snap;
4772 reloc_root = create_reloc_root(trans, root->reloc_root,
4773 new_root->root_key.objectid);
4774 if (IS_ERR(reloc_root))
4775 return PTR_ERR(reloc_root);
4776
4777 ret = __add_reloc_root(reloc_root);
4778 BUG_ON(ret < 0);
4779 new_root->reloc_root = reloc_root;
4780
4781 if (rc->create_reloc_tree)
4782 ret = clone_backref_node(trans, rc, root, reloc_root);
4783 return ret;
4784}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>
9#include <linux/blkdev.h>
10#include <linux/rbtree.h>
11#include <linux/slab.h>
12#include <linux/error-injection.h>
13#include "ctree.h"
14#include "disk-io.h"
15#include "transaction.h"
16#include "volumes.h"
17#include "locking.h"
18#include "btrfs_inode.h"
19#include "async-thread.h"
20#include "free-space-cache.h"
21#include "qgroup.h"
22#include "print-tree.h"
23#include "delalloc-space.h"
24#include "block-group.h"
25#include "backref.h"
26#include "misc.h"
27#include "subpage.h"
28#include "zoned.h"
29#include "inode-item.h"
30#include "space-info.h"
31#include "fs.h"
32#include "accessors.h"
33#include "extent-tree.h"
34#include "root-tree.h"
35#include "file-item.h"
36#include "relocation.h"
37#include "super.h"
38#include "tree-checker.h"
39
40/*
41 * Relocation overview
42 *
43 * [What does relocation do]
44 *
45 * The objective of relocation is to relocate all extents of the target block
46 * group to other block groups.
47 * This is utilized by resize (shrink only), profile converting, compacting
48 * space, or balance routine to spread chunks over devices.
49 *
50 * Before | After
51 * ------------------------------------------------------------------
52 * BG A: 10 data extents | BG A: deleted
53 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
54 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
55 *
56 * [How does relocation work]
57 *
58 * 1. Mark the target block group read-only
59 * New extents won't be allocated from the target block group.
60 *
61 * 2.1 Record each extent in the target block group
62 * To build a proper map of extents to be relocated.
63 *
64 * 2.2 Build data reloc tree and reloc trees
65 * Data reloc tree will contain an inode, recording all newly relocated
66 * data extents.
67 * There will be only one data reloc tree for one data block group.
68 *
69 * Reloc tree will be a special snapshot of its source tree, containing
70 * relocated tree blocks.
71 * Each tree referring to a tree block in target block group will get its
72 * reloc tree built.
73 *
74 * 2.3 Swap source tree with its corresponding reloc tree
75 * Each involved tree only refers to new extents after swap.
76 *
77 * 3. Cleanup reloc trees and data reloc tree.
78 * As old extents in the target block group are still referenced by reloc
79 * trees, we need to clean them up before really freeing the target block
80 * group.
81 *
82 * The main complexity is in steps 2.2 and 2.3.
83 *
84 * The entry point of relocation is relocate_block_group() function.
85 */
86
87#define RELOCATION_RESERVED_NODES 256
88/*
89 * map address of tree root to tree
90 */
91struct mapping_node {
92 struct {
93 struct rb_node rb_node;
94 u64 bytenr;
95 }; /* Use rb_simle_node for search/insert */
96 void *data;
97};
98
99struct mapping_tree {
100 struct rb_root rb_root;
101 spinlock_t lock;
102};
103
104/*
105 * present a tree block to process
106 */
107struct tree_block {
108 struct {
109 struct rb_node rb_node;
110 u64 bytenr;
111 }; /* Use rb_simple_node for search/insert */
112 u64 owner;
113 struct btrfs_key key;
114 u8 level;
115 bool key_ready;
116};
117
118#define MAX_EXTENTS 128
119
120struct file_extent_cluster {
121 u64 start;
122 u64 end;
123 u64 boundary[MAX_EXTENTS];
124 unsigned int nr;
125 u64 owning_root;
126};
127
128/* Stages of data relocation. */
129enum reloc_stage {
130 MOVE_DATA_EXTENTS,
131 UPDATE_DATA_PTRS
132};
133
134struct reloc_control {
135 /* block group to relocate */
136 struct btrfs_block_group *block_group;
137 /* extent tree */
138 struct btrfs_root *extent_root;
139 /* inode for moving data */
140 struct inode *data_inode;
141
142 struct btrfs_block_rsv *block_rsv;
143
144 struct btrfs_backref_cache backref_cache;
145
146 struct file_extent_cluster cluster;
147 /* tree blocks have been processed */
148 struct extent_io_tree processed_blocks;
149 /* map start of tree root to corresponding reloc tree */
150 struct mapping_tree reloc_root_tree;
151 /* list of reloc trees */
152 struct list_head reloc_roots;
153 /* list of subvolume trees that get relocated */
154 struct list_head dirty_subvol_roots;
155 /* size of metadata reservation for merging reloc trees */
156 u64 merging_rsv_size;
157 /* size of relocated tree nodes */
158 u64 nodes_relocated;
159 /* reserved size for block group relocation*/
160 u64 reserved_bytes;
161
162 u64 search_start;
163 u64 extents_found;
164
165 enum reloc_stage stage;
166 bool create_reloc_tree;
167 bool merge_reloc_tree;
168 bool found_file_extent;
169};
170
171static void mark_block_processed(struct reloc_control *rc,
172 struct btrfs_backref_node *node)
173{
174 u32 blocksize;
175
176 if (node->level == 0 ||
177 in_range(node->bytenr, rc->block_group->start,
178 rc->block_group->length)) {
179 blocksize = rc->extent_root->fs_info->nodesize;
180 set_extent_bit(&rc->processed_blocks, node->bytenr,
181 node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL);
182 }
183 node->processed = 1;
184}
185
186/*
187 * walk up backref nodes until reach node presents tree root
188 */
189static struct btrfs_backref_node *walk_up_backref(
190 struct btrfs_backref_node *node,
191 struct btrfs_backref_edge *edges[], int *index)
192{
193 struct btrfs_backref_edge *edge;
194 int idx = *index;
195
196 while (!list_empty(&node->upper)) {
197 edge = list_entry(node->upper.next,
198 struct btrfs_backref_edge, list[LOWER]);
199 edges[idx++] = edge;
200 node = edge->node[UPPER];
201 }
202 BUG_ON(node->detached);
203 *index = idx;
204 return node;
205}
206
207/*
208 * walk down backref nodes to find start of next reference path
209 */
210static struct btrfs_backref_node *walk_down_backref(
211 struct btrfs_backref_edge *edges[], int *index)
212{
213 struct btrfs_backref_edge *edge;
214 struct btrfs_backref_node *lower;
215 int idx = *index;
216
217 while (idx > 0) {
218 edge = edges[idx - 1];
219 lower = edge->node[LOWER];
220 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
221 idx--;
222 continue;
223 }
224 edge = list_entry(edge->list[LOWER].next,
225 struct btrfs_backref_edge, list[LOWER]);
226 edges[idx - 1] = edge;
227 *index = idx;
228 return edge->node[UPPER];
229 }
230 *index = 0;
231 return NULL;
232}
233
234static void update_backref_node(struct btrfs_backref_cache *cache,
235 struct btrfs_backref_node *node, u64 bytenr)
236{
237 struct rb_node *rb_node;
238 rb_erase(&node->rb_node, &cache->rb_root);
239 node->bytenr = bytenr;
240 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
241 if (rb_node)
242 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
243}
244
245/*
246 * update backref cache after a transaction commit
247 */
248static int update_backref_cache(struct btrfs_trans_handle *trans,
249 struct btrfs_backref_cache *cache)
250{
251 struct btrfs_backref_node *node;
252 int level = 0;
253
254 if (cache->last_trans == 0) {
255 cache->last_trans = trans->transid;
256 return 0;
257 }
258
259 if (cache->last_trans == trans->transid)
260 return 0;
261
262 /*
263 * detached nodes are used to avoid unnecessary backref
264 * lookup. transaction commit changes the extent tree.
265 * so the detached nodes are no longer useful.
266 */
267 while (!list_empty(&cache->detached)) {
268 node = list_entry(cache->detached.next,
269 struct btrfs_backref_node, list);
270 btrfs_backref_cleanup_node(cache, node);
271 }
272
273 while (!list_empty(&cache->changed)) {
274 node = list_entry(cache->changed.next,
275 struct btrfs_backref_node, list);
276 list_del_init(&node->list);
277 BUG_ON(node->pending);
278 update_backref_node(cache, node, node->new_bytenr);
279 }
280
281 /*
282 * some nodes can be left in the pending list if there were
283 * errors during processing the pending nodes.
284 */
285 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
286 list_for_each_entry(node, &cache->pending[level], list) {
287 BUG_ON(!node->pending);
288 if (node->bytenr == node->new_bytenr)
289 continue;
290 update_backref_node(cache, node, node->new_bytenr);
291 }
292 }
293
294 cache->last_trans = 0;
295 return 1;
296}
297
298static bool reloc_root_is_dead(const struct btrfs_root *root)
299{
300 /*
301 * Pair with set_bit/clear_bit in clean_dirty_subvols and
302 * btrfs_update_reloc_root. We need to see the updated bit before
303 * trying to access reloc_root
304 */
305 smp_rmb();
306 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
307 return true;
308 return false;
309}
310
311/*
312 * Check if this subvolume tree has valid reloc tree.
313 *
314 * Reloc tree after swap is considered dead, thus not considered as valid.
315 * This is enough for most callers, as they don't distinguish dead reloc root
316 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
317 * special case.
318 */
319static bool have_reloc_root(const struct btrfs_root *root)
320{
321 if (reloc_root_is_dead(root))
322 return false;
323 if (!root->reloc_root)
324 return false;
325 return true;
326}
327
328bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
329{
330 struct btrfs_root *reloc_root;
331
332 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
333 return false;
334
335 /* This root has been merged with its reloc tree, we can ignore it */
336 if (reloc_root_is_dead(root))
337 return true;
338
339 reloc_root = root->reloc_root;
340 if (!reloc_root)
341 return false;
342
343 if (btrfs_header_generation(reloc_root->commit_root) ==
344 root->fs_info->running_transaction->transid)
345 return false;
346 /*
347 * If there is reloc tree and it was created in previous transaction
348 * backref lookup can find the reloc tree, so backref node for the fs
349 * tree root is useless for relocation.
350 */
351 return true;
352}
353
354/*
355 * find reloc tree by address of tree root
356 */
357struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
358{
359 struct reloc_control *rc = fs_info->reloc_ctl;
360 struct rb_node *rb_node;
361 struct mapping_node *node;
362 struct btrfs_root *root = NULL;
363
364 ASSERT(rc);
365 spin_lock(&rc->reloc_root_tree.lock);
366 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
367 if (rb_node) {
368 node = rb_entry(rb_node, struct mapping_node, rb_node);
369 root = node->data;
370 }
371 spin_unlock(&rc->reloc_root_tree.lock);
372 return btrfs_grab_root(root);
373}
374
375/*
376 * For useless nodes, do two major clean ups:
377 *
378 * - Cleanup the children edges and nodes
379 * If child node is also orphan (no parent) during cleanup, then the child
380 * node will also be cleaned up.
381 *
382 * - Freeing up leaves (level 0), keeps nodes detached
383 * For nodes, the node is still cached as "detached"
384 *
385 * Return false if @node is not in the @useless_nodes list.
386 * Return true if @node is in the @useless_nodes list.
387 */
388static bool handle_useless_nodes(struct reloc_control *rc,
389 struct btrfs_backref_node *node)
390{
391 struct btrfs_backref_cache *cache = &rc->backref_cache;
392 struct list_head *useless_node = &cache->useless_node;
393 bool ret = false;
394
395 while (!list_empty(useless_node)) {
396 struct btrfs_backref_node *cur;
397
398 cur = list_first_entry(useless_node, struct btrfs_backref_node,
399 list);
400 list_del_init(&cur->list);
401
402 /* Only tree root nodes can be added to @useless_nodes */
403 ASSERT(list_empty(&cur->upper));
404
405 if (cur == node)
406 ret = true;
407
408 /* The node is the lowest node */
409 if (cur->lowest) {
410 list_del_init(&cur->lower);
411 cur->lowest = 0;
412 }
413
414 /* Cleanup the lower edges */
415 while (!list_empty(&cur->lower)) {
416 struct btrfs_backref_edge *edge;
417 struct btrfs_backref_node *lower;
418
419 edge = list_entry(cur->lower.next,
420 struct btrfs_backref_edge, list[UPPER]);
421 list_del(&edge->list[UPPER]);
422 list_del(&edge->list[LOWER]);
423 lower = edge->node[LOWER];
424 btrfs_backref_free_edge(cache, edge);
425
426 /* Child node is also orphan, queue for cleanup */
427 if (list_empty(&lower->upper))
428 list_add(&lower->list, useless_node);
429 }
430 /* Mark this block processed for relocation */
431 mark_block_processed(rc, cur);
432
433 /*
434 * Backref nodes for tree leaves are deleted from the cache.
435 * Backref nodes for upper level tree blocks are left in the
436 * cache to avoid unnecessary backref lookup.
437 */
438 if (cur->level > 0) {
439 list_add(&cur->list, &cache->detached);
440 cur->detached = 1;
441 } else {
442 rb_erase(&cur->rb_node, &cache->rb_root);
443 btrfs_backref_free_node(cache, cur);
444 }
445 }
446 return ret;
447}
448
449/*
450 * Build backref tree for a given tree block. Root of the backref tree
451 * corresponds the tree block, leaves of the backref tree correspond roots of
452 * b-trees that reference the tree block.
453 *
454 * The basic idea of this function is check backrefs of a given block to find
455 * upper level blocks that reference the block, and then check backrefs of
456 * these upper level blocks recursively. The recursion stops when tree root is
457 * reached or backrefs for the block is cached.
458 *
459 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
460 * all upper level blocks that directly/indirectly reference the block are also
461 * cached.
462 */
463static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
464 struct btrfs_trans_handle *trans,
465 struct reloc_control *rc, struct btrfs_key *node_key,
466 int level, u64 bytenr)
467{
468 struct btrfs_backref_iter *iter;
469 struct btrfs_backref_cache *cache = &rc->backref_cache;
470 /* For searching parent of TREE_BLOCK_REF */
471 struct btrfs_path *path;
472 struct btrfs_backref_node *cur;
473 struct btrfs_backref_node *node = NULL;
474 struct btrfs_backref_edge *edge;
475 int ret;
476 int err = 0;
477
478 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
479 if (!iter)
480 return ERR_PTR(-ENOMEM);
481 path = btrfs_alloc_path();
482 if (!path) {
483 err = -ENOMEM;
484 goto out;
485 }
486
487 node = btrfs_backref_alloc_node(cache, bytenr, level);
488 if (!node) {
489 err = -ENOMEM;
490 goto out;
491 }
492
493 node->lowest = 1;
494 cur = node;
495
496 /* Breadth-first search to build backref cache */
497 do {
498 ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
499 node_key, cur);
500 if (ret < 0) {
501 err = ret;
502 goto out;
503 }
504 edge = list_first_entry_or_null(&cache->pending_edge,
505 struct btrfs_backref_edge, list[UPPER]);
506 /*
507 * The pending list isn't empty, take the first block to
508 * process
509 */
510 if (edge) {
511 list_del_init(&edge->list[UPPER]);
512 cur = edge->node[UPPER];
513 }
514 } while (edge);
515
516 /* Finish the upper linkage of newly added edges/nodes */
517 ret = btrfs_backref_finish_upper_links(cache, node);
518 if (ret < 0) {
519 err = ret;
520 goto out;
521 }
522
523 if (handle_useless_nodes(rc, node))
524 node = NULL;
525out:
526 btrfs_free_path(iter->path);
527 kfree(iter);
528 btrfs_free_path(path);
529 if (err) {
530 btrfs_backref_error_cleanup(cache, node);
531 return ERR_PTR(err);
532 }
533 ASSERT(!node || !node->detached);
534 ASSERT(list_empty(&cache->useless_node) &&
535 list_empty(&cache->pending_edge));
536 return node;
537}
538
539/*
540 * helper to add backref node for the newly created snapshot.
541 * the backref node is created by cloning backref node that
542 * corresponds to root of source tree
543 */
544static int clone_backref_node(struct btrfs_trans_handle *trans,
545 struct reloc_control *rc,
546 const struct btrfs_root *src,
547 struct btrfs_root *dest)
548{
549 struct btrfs_root *reloc_root = src->reloc_root;
550 struct btrfs_backref_cache *cache = &rc->backref_cache;
551 struct btrfs_backref_node *node = NULL;
552 struct btrfs_backref_node *new_node;
553 struct btrfs_backref_edge *edge;
554 struct btrfs_backref_edge *new_edge;
555 struct rb_node *rb_node;
556
557 if (cache->last_trans > 0)
558 update_backref_cache(trans, cache);
559
560 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
561 if (rb_node) {
562 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
563 if (node->detached)
564 node = NULL;
565 else
566 BUG_ON(node->new_bytenr != reloc_root->node->start);
567 }
568
569 if (!node) {
570 rb_node = rb_simple_search(&cache->rb_root,
571 reloc_root->commit_root->start);
572 if (rb_node) {
573 node = rb_entry(rb_node, struct btrfs_backref_node,
574 rb_node);
575 BUG_ON(node->detached);
576 }
577 }
578
579 if (!node)
580 return 0;
581
582 new_node = btrfs_backref_alloc_node(cache, dest->node->start,
583 node->level);
584 if (!new_node)
585 return -ENOMEM;
586
587 new_node->lowest = node->lowest;
588 new_node->checked = 1;
589 new_node->root = btrfs_grab_root(dest);
590 ASSERT(new_node->root);
591
592 if (!node->lowest) {
593 list_for_each_entry(edge, &node->lower, list[UPPER]) {
594 new_edge = btrfs_backref_alloc_edge(cache);
595 if (!new_edge)
596 goto fail;
597
598 btrfs_backref_link_edge(new_edge, edge->node[LOWER],
599 new_node, LINK_UPPER);
600 }
601 } else {
602 list_add_tail(&new_node->lower, &cache->leaves);
603 }
604
605 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
606 &new_node->rb_node);
607 if (rb_node)
608 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
609
610 if (!new_node->lowest) {
611 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
612 list_add_tail(&new_edge->list[LOWER],
613 &new_edge->node[LOWER]->upper);
614 }
615 }
616 return 0;
617fail:
618 while (!list_empty(&new_node->lower)) {
619 new_edge = list_entry(new_node->lower.next,
620 struct btrfs_backref_edge, list[UPPER]);
621 list_del(&new_edge->list[UPPER]);
622 btrfs_backref_free_edge(cache, new_edge);
623 }
624 btrfs_backref_free_node(cache, new_node);
625 return -ENOMEM;
626}
627
628/*
629 * helper to add 'address of tree root -> reloc tree' mapping
630 */
631static int __add_reloc_root(struct btrfs_root *root)
632{
633 struct btrfs_fs_info *fs_info = root->fs_info;
634 struct rb_node *rb_node;
635 struct mapping_node *node;
636 struct reloc_control *rc = fs_info->reloc_ctl;
637
638 node = kmalloc(sizeof(*node), GFP_NOFS);
639 if (!node)
640 return -ENOMEM;
641
642 node->bytenr = root->commit_root->start;
643 node->data = root;
644
645 spin_lock(&rc->reloc_root_tree.lock);
646 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
647 node->bytenr, &node->rb_node);
648 spin_unlock(&rc->reloc_root_tree.lock);
649 if (rb_node) {
650 btrfs_err(fs_info,
651 "Duplicate root found for start=%llu while inserting into relocation tree",
652 node->bytenr);
653 return -EEXIST;
654 }
655
656 list_add_tail(&root->root_list, &rc->reloc_roots);
657 return 0;
658}
659
660/*
661 * helper to delete the 'address of tree root -> reloc tree'
662 * mapping
663 */
664static void __del_reloc_root(struct btrfs_root *root)
665{
666 struct btrfs_fs_info *fs_info = root->fs_info;
667 struct rb_node *rb_node;
668 struct mapping_node *node = NULL;
669 struct reloc_control *rc = fs_info->reloc_ctl;
670 bool put_ref = false;
671
672 if (rc && root->node) {
673 spin_lock(&rc->reloc_root_tree.lock);
674 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
675 root->commit_root->start);
676 if (rb_node) {
677 node = rb_entry(rb_node, struct mapping_node, rb_node);
678 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
679 RB_CLEAR_NODE(&node->rb_node);
680 }
681 spin_unlock(&rc->reloc_root_tree.lock);
682 ASSERT(!node || (struct btrfs_root *)node->data == root);
683 }
684
685 /*
686 * We only put the reloc root here if it's on the list. There's a lot
687 * of places where the pattern is to splice the rc->reloc_roots, process
688 * the reloc roots, and then add the reloc root back onto
689 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
690 * list we don't want the reference being dropped, because the guy
691 * messing with the list is in charge of the reference.
692 */
693 spin_lock(&fs_info->trans_lock);
694 if (!list_empty(&root->root_list)) {
695 put_ref = true;
696 list_del_init(&root->root_list);
697 }
698 spin_unlock(&fs_info->trans_lock);
699 if (put_ref)
700 btrfs_put_root(root);
701 kfree(node);
702}
703
704/*
705 * helper to update the 'address of tree root -> reloc tree'
706 * mapping
707 */
708static int __update_reloc_root(struct btrfs_root *root)
709{
710 struct btrfs_fs_info *fs_info = root->fs_info;
711 struct rb_node *rb_node;
712 struct mapping_node *node = NULL;
713 struct reloc_control *rc = fs_info->reloc_ctl;
714
715 spin_lock(&rc->reloc_root_tree.lock);
716 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
717 root->commit_root->start);
718 if (rb_node) {
719 node = rb_entry(rb_node, struct mapping_node, rb_node);
720 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
721 }
722 spin_unlock(&rc->reloc_root_tree.lock);
723
724 if (!node)
725 return 0;
726 BUG_ON((struct btrfs_root *)node->data != root);
727
728 spin_lock(&rc->reloc_root_tree.lock);
729 node->bytenr = root->node->start;
730 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
731 node->bytenr, &node->rb_node);
732 spin_unlock(&rc->reloc_root_tree.lock);
733 if (rb_node)
734 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
735 return 0;
736}
737
738static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
739 struct btrfs_root *root, u64 objectid)
740{
741 struct btrfs_fs_info *fs_info = root->fs_info;
742 struct btrfs_root *reloc_root;
743 struct extent_buffer *eb;
744 struct btrfs_root_item *root_item;
745 struct btrfs_key root_key;
746 int ret = 0;
747 bool must_abort = false;
748
749 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
750 if (!root_item)
751 return ERR_PTR(-ENOMEM);
752
753 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
754 root_key.type = BTRFS_ROOT_ITEM_KEY;
755 root_key.offset = objectid;
756
757 if (root->root_key.objectid == objectid) {
758 u64 commit_root_gen;
759
760 /* called by btrfs_init_reloc_root */
761 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
762 BTRFS_TREE_RELOC_OBJECTID);
763 if (ret)
764 goto fail;
765
766 /*
767 * Set the last_snapshot field to the generation of the commit
768 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
769 * correctly (returns true) when the relocation root is created
770 * either inside the critical section of a transaction commit
771 * (through transaction.c:qgroup_account_snapshot()) and when
772 * it's created before the transaction commit is started.
773 */
774 commit_root_gen = btrfs_header_generation(root->commit_root);
775 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
776 } else {
777 /*
778 * called by btrfs_reloc_post_snapshot_hook.
779 * the source tree is a reloc tree, all tree blocks
780 * modified after it was created have RELOC flag
781 * set in their headers. so it's OK to not update
782 * the 'last_snapshot'.
783 */
784 ret = btrfs_copy_root(trans, root, root->node, &eb,
785 BTRFS_TREE_RELOC_OBJECTID);
786 if (ret)
787 goto fail;
788 }
789
790 /*
791 * We have changed references at this point, we must abort the
792 * transaction if anything fails.
793 */
794 must_abort = true;
795
796 memcpy(root_item, &root->root_item, sizeof(*root_item));
797 btrfs_set_root_bytenr(root_item, eb->start);
798 btrfs_set_root_level(root_item, btrfs_header_level(eb));
799 btrfs_set_root_generation(root_item, trans->transid);
800
801 if (root->root_key.objectid == objectid) {
802 btrfs_set_root_refs(root_item, 0);
803 memset(&root_item->drop_progress, 0,
804 sizeof(struct btrfs_disk_key));
805 btrfs_set_root_drop_level(root_item, 0);
806 }
807
808 btrfs_tree_unlock(eb);
809 free_extent_buffer(eb);
810
811 ret = btrfs_insert_root(trans, fs_info->tree_root,
812 &root_key, root_item);
813 if (ret)
814 goto fail;
815
816 kfree(root_item);
817
818 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
819 if (IS_ERR(reloc_root)) {
820 ret = PTR_ERR(reloc_root);
821 goto abort;
822 }
823 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
824 reloc_root->last_trans = trans->transid;
825 return reloc_root;
826fail:
827 kfree(root_item);
828abort:
829 if (must_abort)
830 btrfs_abort_transaction(trans, ret);
831 return ERR_PTR(ret);
832}
833
834/*
835 * create reloc tree for a given fs tree. reloc tree is just a
836 * snapshot of the fs tree with special root objectid.
837 *
838 * The reloc_root comes out of here with two references, one for
839 * root->reloc_root, and another for being on the rc->reloc_roots list.
840 */
841int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
842 struct btrfs_root *root)
843{
844 struct btrfs_fs_info *fs_info = root->fs_info;
845 struct btrfs_root *reloc_root;
846 struct reloc_control *rc = fs_info->reloc_ctl;
847 struct btrfs_block_rsv *rsv;
848 int clear_rsv = 0;
849 int ret;
850
851 if (!rc)
852 return 0;
853
854 /*
855 * The subvolume has reloc tree but the swap is finished, no need to
856 * create/update the dead reloc tree
857 */
858 if (reloc_root_is_dead(root))
859 return 0;
860
861 /*
862 * This is subtle but important. We do not do
863 * record_root_in_transaction for reloc roots, instead we record their
864 * corresponding fs root, and then here we update the last trans for the
865 * reloc root. This means that we have to do this for the entire life
866 * of the reloc root, regardless of which stage of the relocation we are
867 * in.
868 */
869 if (root->reloc_root) {
870 reloc_root = root->reloc_root;
871 reloc_root->last_trans = trans->transid;
872 return 0;
873 }
874
875 /*
876 * We are merging reloc roots, we do not need new reloc trees. Also
877 * reloc trees never need their own reloc tree.
878 */
879 if (!rc->create_reloc_tree ||
880 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
881 return 0;
882
883 if (!trans->reloc_reserved) {
884 rsv = trans->block_rsv;
885 trans->block_rsv = rc->block_rsv;
886 clear_rsv = 1;
887 }
888 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
889 if (clear_rsv)
890 trans->block_rsv = rsv;
891 if (IS_ERR(reloc_root))
892 return PTR_ERR(reloc_root);
893
894 ret = __add_reloc_root(reloc_root);
895 ASSERT(ret != -EEXIST);
896 if (ret) {
897 /* Pairs with create_reloc_root */
898 btrfs_put_root(reloc_root);
899 return ret;
900 }
901 root->reloc_root = btrfs_grab_root(reloc_root);
902 return 0;
903}
904
905/*
906 * update root item of reloc tree
907 */
908int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
909 struct btrfs_root *root)
910{
911 struct btrfs_fs_info *fs_info = root->fs_info;
912 struct btrfs_root *reloc_root;
913 struct btrfs_root_item *root_item;
914 int ret;
915
916 if (!have_reloc_root(root))
917 return 0;
918
919 reloc_root = root->reloc_root;
920 root_item = &reloc_root->root_item;
921
922 /*
923 * We are probably ok here, but __del_reloc_root() will drop its ref of
924 * the root. We have the ref for root->reloc_root, but just in case
925 * hold it while we update the reloc root.
926 */
927 btrfs_grab_root(reloc_root);
928
929 /* root->reloc_root will stay until current relocation finished */
930 if (fs_info->reloc_ctl->merge_reloc_tree &&
931 btrfs_root_refs(root_item) == 0) {
932 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
933 /*
934 * Mark the tree as dead before we change reloc_root so
935 * have_reloc_root will not touch it from now on.
936 */
937 smp_wmb();
938 __del_reloc_root(reloc_root);
939 }
940
941 if (reloc_root->commit_root != reloc_root->node) {
942 __update_reloc_root(reloc_root);
943 btrfs_set_root_node(root_item, reloc_root->node);
944 free_extent_buffer(reloc_root->commit_root);
945 reloc_root->commit_root = btrfs_root_node(reloc_root);
946 }
947
948 ret = btrfs_update_root(trans, fs_info->tree_root,
949 &reloc_root->root_key, root_item);
950 btrfs_put_root(reloc_root);
951 return ret;
952}
953
954/*
955 * helper to find first cached inode with inode number >= objectid
956 * in a subvolume
957 */
958static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
959{
960 struct rb_node *node;
961 struct rb_node *prev;
962 struct btrfs_inode *entry;
963 struct inode *inode;
964
965 spin_lock(&root->inode_lock);
966again:
967 node = root->inode_tree.rb_node;
968 prev = NULL;
969 while (node) {
970 prev = node;
971 entry = rb_entry(node, struct btrfs_inode, rb_node);
972
973 if (objectid < btrfs_ino(entry))
974 node = node->rb_left;
975 else if (objectid > btrfs_ino(entry))
976 node = node->rb_right;
977 else
978 break;
979 }
980 if (!node) {
981 while (prev) {
982 entry = rb_entry(prev, struct btrfs_inode, rb_node);
983 if (objectid <= btrfs_ino(entry)) {
984 node = prev;
985 break;
986 }
987 prev = rb_next(prev);
988 }
989 }
990 while (node) {
991 entry = rb_entry(node, struct btrfs_inode, rb_node);
992 inode = igrab(&entry->vfs_inode);
993 if (inode) {
994 spin_unlock(&root->inode_lock);
995 return inode;
996 }
997
998 objectid = btrfs_ino(entry) + 1;
999 if (cond_resched_lock(&root->inode_lock))
1000 goto again;
1001
1002 node = rb_next(node);
1003 }
1004 spin_unlock(&root->inode_lock);
1005 return NULL;
1006}
1007
1008/*
1009 * get new location of data
1010 */
1011static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1012 u64 bytenr, u64 num_bytes)
1013{
1014 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1015 struct btrfs_path *path;
1016 struct btrfs_file_extent_item *fi;
1017 struct extent_buffer *leaf;
1018 int ret;
1019
1020 path = btrfs_alloc_path();
1021 if (!path)
1022 return -ENOMEM;
1023
1024 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1025 ret = btrfs_lookup_file_extent(NULL, root, path,
1026 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1027 if (ret < 0)
1028 goto out;
1029 if (ret > 0) {
1030 ret = -ENOENT;
1031 goto out;
1032 }
1033
1034 leaf = path->nodes[0];
1035 fi = btrfs_item_ptr(leaf, path->slots[0],
1036 struct btrfs_file_extent_item);
1037
1038 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1039 btrfs_file_extent_compression(leaf, fi) ||
1040 btrfs_file_extent_encryption(leaf, fi) ||
1041 btrfs_file_extent_other_encoding(leaf, fi));
1042
1043 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1044 ret = -EINVAL;
1045 goto out;
1046 }
1047
1048 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1049 ret = 0;
1050out:
1051 btrfs_free_path(path);
1052 return ret;
1053}
1054
1055/*
1056 * update file extent items in the tree leaf to point to
1057 * the new locations.
1058 */
1059static noinline_for_stack
1060int replace_file_extents(struct btrfs_trans_handle *trans,
1061 struct reloc_control *rc,
1062 struct btrfs_root *root,
1063 struct extent_buffer *leaf)
1064{
1065 struct btrfs_fs_info *fs_info = root->fs_info;
1066 struct btrfs_key key;
1067 struct btrfs_file_extent_item *fi;
1068 struct inode *inode = NULL;
1069 u64 parent;
1070 u64 bytenr;
1071 u64 new_bytenr = 0;
1072 u64 num_bytes;
1073 u64 end;
1074 u32 nritems;
1075 u32 i;
1076 int ret = 0;
1077 int first = 1;
1078 int dirty = 0;
1079
1080 if (rc->stage != UPDATE_DATA_PTRS)
1081 return 0;
1082
1083 /* reloc trees always use full backref */
1084 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1085 parent = leaf->start;
1086 else
1087 parent = 0;
1088
1089 nritems = btrfs_header_nritems(leaf);
1090 for (i = 0; i < nritems; i++) {
1091 struct btrfs_ref ref = { 0 };
1092
1093 cond_resched();
1094 btrfs_item_key_to_cpu(leaf, &key, i);
1095 if (key.type != BTRFS_EXTENT_DATA_KEY)
1096 continue;
1097 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1098 if (btrfs_file_extent_type(leaf, fi) ==
1099 BTRFS_FILE_EXTENT_INLINE)
1100 continue;
1101 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1102 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1103 if (bytenr == 0)
1104 continue;
1105 if (!in_range(bytenr, rc->block_group->start,
1106 rc->block_group->length))
1107 continue;
1108
1109 /*
1110 * if we are modifying block in fs tree, wait for read_folio
1111 * to complete and drop the extent cache
1112 */
1113 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1114 if (first) {
1115 inode = find_next_inode(root, key.objectid);
1116 first = 0;
1117 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1118 btrfs_add_delayed_iput(BTRFS_I(inode));
1119 inode = find_next_inode(root, key.objectid);
1120 }
1121 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1122 struct extent_state *cached_state = NULL;
1123
1124 end = key.offset +
1125 btrfs_file_extent_num_bytes(leaf, fi);
1126 WARN_ON(!IS_ALIGNED(key.offset,
1127 fs_info->sectorsize));
1128 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1129 end--;
1130 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1131 key.offset, end,
1132 &cached_state);
1133 if (!ret)
1134 continue;
1135
1136 btrfs_drop_extent_map_range(BTRFS_I(inode),
1137 key.offset, end, true);
1138 unlock_extent(&BTRFS_I(inode)->io_tree,
1139 key.offset, end, &cached_state);
1140 }
1141 }
1142
1143 ret = get_new_location(rc->data_inode, &new_bytenr,
1144 bytenr, num_bytes);
1145 if (ret) {
1146 /*
1147 * Don't have to abort since we've not changed anything
1148 * in the file extent yet.
1149 */
1150 break;
1151 }
1152
1153 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1154 dirty = 1;
1155
1156 key.offset -= btrfs_file_extent_offset(leaf, fi);
1157 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1158 num_bytes, parent, root->root_key.objectid);
1159 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1160 key.objectid, key.offset,
1161 root->root_key.objectid, false);
1162 ret = btrfs_inc_extent_ref(trans, &ref);
1163 if (ret) {
1164 btrfs_abort_transaction(trans, ret);
1165 break;
1166 }
1167
1168 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1169 num_bytes, parent, root->root_key.objectid);
1170 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1171 key.objectid, key.offset,
1172 root->root_key.objectid, false);
1173 ret = btrfs_free_extent(trans, &ref);
1174 if (ret) {
1175 btrfs_abort_transaction(trans, ret);
1176 break;
1177 }
1178 }
1179 if (dirty)
1180 btrfs_mark_buffer_dirty(trans, leaf);
1181 if (inode)
1182 btrfs_add_delayed_iput(BTRFS_I(inode));
1183 return ret;
1184}
1185
1186static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
1187 int slot, const struct btrfs_path *path,
1188 int level)
1189{
1190 struct btrfs_disk_key key1;
1191 struct btrfs_disk_key key2;
1192 btrfs_node_key(eb, &key1, slot);
1193 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1194 return memcmp(&key1, &key2, sizeof(key1));
1195}
1196
1197/*
1198 * try to replace tree blocks in fs tree with the new blocks
1199 * in reloc tree. tree blocks haven't been modified since the
1200 * reloc tree was create can be replaced.
1201 *
1202 * if a block was replaced, level of the block + 1 is returned.
1203 * if no block got replaced, 0 is returned. if there are other
1204 * errors, a negative error number is returned.
1205 */
1206static noinline_for_stack
1207int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1208 struct btrfs_root *dest, struct btrfs_root *src,
1209 struct btrfs_path *path, struct btrfs_key *next_key,
1210 int lowest_level, int max_level)
1211{
1212 struct btrfs_fs_info *fs_info = dest->fs_info;
1213 struct extent_buffer *eb;
1214 struct extent_buffer *parent;
1215 struct btrfs_ref ref = { 0 };
1216 struct btrfs_key key;
1217 u64 old_bytenr;
1218 u64 new_bytenr;
1219 u64 old_ptr_gen;
1220 u64 new_ptr_gen;
1221 u64 last_snapshot;
1222 u32 blocksize;
1223 int cow = 0;
1224 int level;
1225 int ret;
1226 int slot;
1227
1228 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1229 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1230
1231 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1232again:
1233 slot = path->slots[lowest_level];
1234 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1235
1236 eb = btrfs_lock_root_node(dest);
1237 level = btrfs_header_level(eb);
1238
1239 if (level < lowest_level) {
1240 btrfs_tree_unlock(eb);
1241 free_extent_buffer(eb);
1242 return 0;
1243 }
1244
1245 if (cow) {
1246 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1247 BTRFS_NESTING_COW);
1248 if (ret) {
1249 btrfs_tree_unlock(eb);
1250 free_extent_buffer(eb);
1251 return ret;
1252 }
1253 }
1254
1255 if (next_key) {
1256 next_key->objectid = (u64)-1;
1257 next_key->type = (u8)-1;
1258 next_key->offset = (u64)-1;
1259 }
1260
1261 parent = eb;
1262 while (1) {
1263 level = btrfs_header_level(parent);
1264 ASSERT(level >= lowest_level);
1265
1266 ret = btrfs_bin_search(parent, 0, &key, &slot);
1267 if (ret < 0)
1268 break;
1269 if (ret && slot > 0)
1270 slot--;
1271
1272 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1273 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1274
1275 old_bytenr = btrfs_node_blockptr(parent, slot);
1276 blocksize = fs_info->nodesize;
1277 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1278
1279 if (level <= max_level) {
1280 eb = path->nodes[level];
1281 new_bytenr = btrfs_node_blockptr(eb,
1282 path->slots[level]);
1283 new_ptr_gen = btrfs_node_ptr_generation(eb,
1284 path->slots[level]);
1285 } else {
1286 new_bytenr = 0;
1287 new_ptr_gen = 0;
1288 }
1289
1290 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1291 ret = level;
1292 break;
1293 }
1294
1295 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1296 memcmp_node_keys(parent, slot, path, level)) {
1297 if (level <= lowest_level) {
1298 ret = 0;
1299 break;
1300 }
1301
1302 eb = btrfs_read_node_slot(parent, slot);
1303 if (IS_ERR(eb)) {
1304 ret = PTR_ERR(eb);
1305 break;
1306 }
1307 btrfs_tree_lock(eb);
1308 if (cow) {
1309 ret = btrfs_cow_block(trans, dest, eb, parent,
1310 slot, &eb,
1311 BTRFS_NESTING_COW);
1312 if (ret) {
1313 btrfs_tree_unlock(eb);
1314 free_extent_buffer(eb);
1315 break;
1316 }
1317 }
1318
1319 btrfs_tree_unlock(parent);
1320 free_extent_buffer(parent);
1321
1322 parent = eb;
1323 continue;
1324 }
1325
1326 if (!cow) {
1327 btrfs_tree_unlock(parent);
1328 free_extent_buffer(parent);
1329 cow = 1;
1330 goto again;
1331 }
1332
1333 btrfs_node_key_to_cpu(path->nodes[level], &key,
1334 path->slots[level]);
1335 btrfs_release_path(path);
1336
1337 path->lowest_level = level;
1338 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1339 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1340 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1341 path->lowest_level = 0;
1342 if (ret) {
1343 if (ret > 0)
1344 ret = -ENOENT;
1345 break;
1346 }
1347
1348 /*
1349 * Info qgroup to trace both subtrees.
1350 *
1351 * We must trace both trees.
1352 * 1) Tree reloc subtree
1353 * If not traced, we will leak data numbers
1354 * 2) Fs subtree
1355 * If not traced, we will double count old data
1356 *
1357 * We don't scan the subtree right now, but only record
1358 * the swapped tree blocks.
1359 * The real subtree rescan is delayed until we have new
1360 * CoW on the subtree root node before transaction commit.
1361 */
1362 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1363 rc->block_group, parent, slot,
1364 path->nodes[level], path->slots[level],
1365 last_snapshot);
1366 if (ret < 0)
1367 break;
1368 /*
1369 * swap blocks in fs tree and reloc tree.
1370 */
1371 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1372 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1373 btrfs_mark_buffer_dirty(trans, parent);
1374
1375 btrfs_set_node_blockptr(path->nodes[level],
1376 path->slots[level], old_bytenr);
1377 btrfs_set_node_ptr_generation(path->nodes[level],
1378 path->slots[level], old_ptr_gen);
1379 btrfs_mark_buffer_dirty(trans, path->nodes[level]);
1380
1381 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1382 blocksize, path->nodes[level]->start,
1383 src->root_key.objectid);
1384 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1385 0, true);
1386 ret = btrfs_inc_extent_ref(trans, &ref);
1387 if (ret) {
1388 btrfs_abort_transaction(trans, ret);
1389 break;
1390 }
1391 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1392 blocksize, 0, dest->root_key.objectid);
1393 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
1394 true);
1395 ret = btrfs_inc_extent_ref(trans, &ref);
1396 if (ret) {
1397 btrfs_abort_transaction(trans, ret);
1398 break;
1399 }
1400
1401 /* We don't know the real owning_root, use 0. */
1402 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1403 blocksize, path->nodes[level]->start, 0);
1404 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1405 0, true);
1406 ret = btrfs_free_extent(trans, &ref);
1407 if (ret) {
1408 btrfs_abort_transaction(trans, ret);
1409 break;
1410 }
1411
1412 /* We don't know the real owning_root, use 0. */
1413 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1414 blocksize, 0, 0);
1415 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
1416 0, true);
1417 ret = btrfs_free_extent(trans, &ref);
1418 if (ret) {
1419 btrfs_abort_transaction(trans, ret);
1420 break;
1421 }
1422
1423 btrfs_unlock_up_safe(path, 0);
1424
1425 ret = level;
1426 break;
1427 }
1428 btrfs_tree_unlock(parent);
1429 free_extent_buffer(parent);
1430 return ret;
1431}
1432
1433/*
1434 * helper to find next relocated block in reloc tree
1435 */
1436static noinline_for_stack
1437int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1438 int *level)
1439{
1440 struct extent_buffer *eb;
1441 int i;
1442 u64 last_snapshot;
1443 u32 nritems;
1444
1445 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1446
1447 for (i = 0; i < *level; i++) {
1448 free_extent_buffer(path->nodes[i]);
1449 path->nodes[i] = NULL;
1450 }
1451
1452 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1453 eb = path->nodes[i];
1454 nritems = btrfs_header_nritems(eb);
1455 while (path->slots[i] + 1 < nritems) {
1456 path->slots[i]++;
1457 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1458 last_snapshot)
1459 continue;
1460
1461 *level = i;
1462 return 0;
1463 }
1464 free_extent_buffer(path->nodes[i]);
1465 path->nodes[i] = NULL;
1466 }
1467 return 1;
1468}
1469
1470/*
1471 * walk down reloc tree to find relocated block of lowest level
1472 */
1473static noinline_for_stack
1474int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1475 int *level)
1476{
1477 struct extent_buffer *eb = NULL;
1478 int i;
1479 u64 ptr_gen = 0;
1480 u64 last_snapshot;
1481 u32 nritems;
1482
1483 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1484
1485 for (i = *level; i > 0; i--) {
1486 eb = path->nodes[i];
1487 nritems = btrfs_header_nritems(eb);
1488 while (path->slots[i] < nritems) {
1489 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1490 if (ptr_gen > last_snapshot)
1491 break;
1492 path->slots[i]++;
1493 }
1494 if (path->slots[i] >= nritems) {
1495 if (i == *level)
1496 break;
1497 *level = i + 1;
1498 return 0;
1499 }
1500 if (i == 1) {
1501 *level = i;
1502 return 0;
1503 }
1504
1505 eb = btrfs_read_node_slot(eb, path->slots[i]);
1506 if (IS_ERR(eb))
1507 return PTR_ERR(eb);
1508 BUG_ON(btrfs_header_level(eb) != i - 1);
1509 path->nodes[i - 1] = eb;
1510 path->slots[i - 1] = 0;
1511 }
1512 return 1;
1513}
1514
1515/*
1516 * invalidate extent cache for file extents whose key in range of
1517 * [min_key, max_key)
1518 */
1519static int invalidate_extent_cache(struct btrfs_root *root,
1520 const struct btrfs_key *min_key,
1521 const struct btrfs_key *max_key)
1522{
1523 struct btrfs_fs_info *fs_info = root->fs_info;
1524 struct inode *inode = NULL;
1525 u64 objectid;
1526 u64 start, end;
1527 u64 ino;
1528
1529 objectid = min_key->objectid;
1530 while (1) {
1531 struct extent_state *cached_state = NULL;
1532
1533 cond_resched();
1534 iput(inode);
1535
1536 if (objectid > max_key->objectid)
1537 break;
1538
1539 inode = find_next_inode(root, objectid);
1540 if (!inode)
1541 break;
1542 ino = btrfs_ino(BTRFS_I(inode));
1543
1544 if (ino > max_key->objectid) {
1545 iput(inode);
1546 break;
1547 }
1548
1549 objectid = ino + 1;
1550 if (!S_ISREG(inode->i_mode))
1551 continue;
1552
1553 if (unlikely(min_key->objectid == ino)) {
1554 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1555 continue;
1556 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1557 start = 0;
1558 else {
1559 start = min_key->offset;
1560 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1561 }
1562 } else {
1563 start = 0;
1564 }
1565
1566 if (unlikely(max_key->objectid == ino)) {
1567 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1568 continue;
1569 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1570 end = (u64)-1;
1571 } else {
1572 if (max_key->offset == 0)
1573 continue;
1574 end = max_key->offset;
1575 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1576 end--;
1577 }
1578 } else {
1579 end = (u64)-1;
1580 }
1581
1582 /* the lock_extent waits for read_folio to complete */
1583 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1584 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
1585 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1586 }
1587 return 0;
1588}
1589
1590static int find_next_key(struct btrfs_path *path, int level,
1591 struct btrfs_key *key)
1592
1593{
1594 while (level < BTRFS_MAX_LEVEL) {
1595 if (!path->nodes[level])
1596 break;
1597 if (path->slots[level] + 1 <
1598 btrfs_header_nritems(path->nodes[level])) {
1599 btrfs_node_key_to_cpu(path->nodes[level], key,
1600 path->slots[level] + 1);
1601 return 0;
1602 }
1603 level++;
1604 }
1605 return 1;
1606}
1607
1608/*
1609 * Insert current subvolume into reloc_control::dirty_subvol_roots
1610 */
1611static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1612 struct reloc_control *rc,
1613 struct btrfs_root *root)
1614{
1615 struct btrfs_root *reloc_root = root->reloc_root;
1616 struct btrfs_root_item *reloc_root_item;
1617 int ret;
1618
1619 /* @root must be a subvolume tree root with a valid reloc tree */
1620 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1621 ASSERT(reloc_root);
1622
1623 reloc_root_item = &reloc_root->root_item;
1624 memset(&reloc_root_item->drop_progress, 0,
1625 sizeof(reloc_root_item->drop_progress));
1626 btrfs_set_root_drop_level(reloc_root_item, 0);
1627 btrfs_set_root_refs(reloc_root_item, 0);
1628 ret = btrfs_update_reloc_root(trans, root);
1629 if (ret)
1630 return ret;
1631
1632 if (list_empty(&root->reloc_dirty_list)) {
1633 btrfs_grab_root(root);
1634 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1635 }
1636
1637 return 0;
1638}
1639
1640static int clean_dirty_subvols(struct reloc_control *rc)
1641{
1642 struct btrfs_root *root;
1643 struct btrfs_root *next;
1644 int ret = 0;
1645 int ret2;
1646
1647 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1648 reloc_dirty_list) {
1649 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1650 /* Merged subvolume, cleanup its reloc root */
1651 struct btrfs_root *reloc_root = root->reloc_root;
1652
1653 list_del_init(&root->reloc_dirty_list);
1654 root->reloc_root = NULL;
1655 /*
1656 * Need barrier to ensure clear_bit() only happens after
1657 * root->reloc_root = NULL. Pairs with have_reloc_root.
1658 */
1659 smp_wmb();
1660 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1661 if (reloc_root) {
1662 /*
1663 * btrfs_drop_snapshot drops our ref we hold for
1664 * ->reloc_root. If it fails however we must
1665 * drop the ref ourselves.
1666 */
1667 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1668 if (ret2 < 0) {
1669 btrfs_put_root(reloc_root);
1670 if (!ret)
1671 ret = ret2;
1672 }
1673 }
1674 btrfs_put_root(root);
1675 } else {
1676 /* Orphan reloc tree, just clean it up */
1677 ret2 = btrfs_drop_snapshot(root, 0, 1);
1678 if (ret2 < 0) {
1679 btrfs_put_root(root);
1680 if (!ret)
1681 ret = ret2;
1682 }
1683 }
1684 }
1685 return ret;
1686}
1687
1688/*
1689 * merge the relocated tree blocks in reloc tree with corresponding
1690 * fs tree.
1691 */
1692static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1693 struct btrfs_root *root)
1694{
1695 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1696 struct btrfs_key key;
1697 struct btrfs_key next_key;
1698 struct btrfs_trans_handle *trans = NULL;
1699 struct btrfs_root *reloc_root;
1700 struct btrfs_root_item *root_item;
1701 struct btrfs_path *path;
1702 struct extent_buffer *leaf;
1703 int reserve_level;
1704 int level;
1705 int max_level;
1706 int replaced = 0;
1707 int ret = 0;
1708 u32 min_reserved;
1709
1710 path = btrfs_alloc_path();
1711 if (!path)
1712 return -ENOMEM;
1713 path->reada = READA_FORWARD;
1714
1715 reloc_root = root->reloc_root;
1716 root_item = &reloc_root->root_item;
1717
1718 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1719 level = btrfs_root_level(root_item);
1720 atomic_inc(&reloc_root->node->refs);
1721 path->nodes[level] = reloc_root->node;
1722 path->slots[level] = 0;
1723 } else {
1724 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1725
1726 level = btrfs_root_drop_level(root_item);
1727 BUG_ON(level == 0);
1728 path->lowest_level = level;
1729 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1730 path->lowest_level = 0;
1731 if (ret < 0) {
1732 btrfs_free_path(path);
1733 return ret;
1734 }
1735
1736 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1737 path->slots[level]);
1738 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1739
1740 btrfs_unlock_up_safe(path, 0);
1741 }
1742
1743 /*
1744 * In merge_reloc_root(), we modify the upper level pointer to swap the
1745 * tree blocks between reloc tree and subvolume tree. Thus for tree
1746 * block COW, we COW at most from level 1 to root level for each tree.
1747 *
1748 * Thus the needed metadata size is at most root_level * nodesize,
1749 * and * 2 since we have two trees to COW.
1750 */
1751 reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1752 min_reserved = fs_info->nodesize * reserve_level * 2;
1753 memset(&next_key, 0, sizeof(next_key));
1754
1755 while (1) {
1756 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1757 min_reserved,
1758 BTRFS_RESERVE_FLUSH_LIMIT);
1759 if (ret)
1760 goto out;
1761 trans = btrfs_start_transaction(root, 0);
1762 if (IS_ERR(trans)) {
1763 ret = PTR_ERR(trans);
1764 trans = NULL;
1765 goto out;
1766 }
1767
1768 /*
1769 * At this point we no longer have a reloc_control, so we can't
1770 * depend on btrfs_init_reloc_root to update our last_trans.
1771 *
1772 * But that's ok, we started the trans handle on our
1773 * corresponding fs_root, which means it's been added to the
1774 * dirty list. At commit time we'll still call
1775 * btrfs_update_reloc_root() and update our root item
1776 * appropriately.
1777 */
1778 reloc_root->last_trans = trans->transid;
1779 trans->block_rsv = rc->block_rsv;
1780
1781 replaced = 0;
1782 max_level = level;
1783
1784 ret = walk_down_reloc_tree(reloc_root, path, &level);
1785 if (ret < 0)
1786 goto out;
1787 if (ret > 0)
1788 break;
1789
1790 if (!find_next_key(path, level, &key) &&
1791 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1792 ret = 0;
1793 } else {
1794 ret = replace_path(trans, rc, root, reloc_root, path,
1795 &next_key, level, max_level);
1796 }
1797 if (ret < 0)
1798 goto out;
1799 if (ret > 0) {
1800 level = ret;
1801 btrfs_node_key_to_cpu(path->nodes[level], &key,
1802 path->slots[level]);
1803 replaced = 1;
1804 }
1805
1806 ret = walk_up_reloc_tree(reloc_root, path, &level);
1807 if (ret > 0)
1808 break;
1809
1810 BUG_ON(level == 0);
1811 /*
1812 * save the merging progress in the drop_progress.
1813 * this is OK since root refs == 1 in this case.
1814 */
1815 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1816 path->slots[level]);
1817 btrfs_set_root_drop_level(root_item, level);
1818
1819 btrfs_end_transaction_throttle(trans);
1820 trans = NULL;
1821
1822 btrfs_btree_balance_dirty(fs_info);
1823
1824 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1825 invalidate_extent_cache(root, &key, &next_key);
1826 }
1827
1828 /*
1829 * handle the case only one block in the fs tree need to be
1830 * relocated and the block is tree root.
1831 */
1832 leaf = btrfs_lock_root_node(root);
1833 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1834 BTRFS_NESTING_COW);
1835 btrfs_tree_unlock(leaf);
1836 free_extent_buffer(leaf);
1837out:
1838 btrfs_free_path(path);
1839
1840 if (ret == 0) {
1841 ret = insert_dirty_subvol(trans, rc, root);
1842 if (ret)
1843 btrfs_abort_transaction(trans, ret);
1844 }
1845
1846 if (trans)
1847 btrfs_end_transaction_throttle(trans);
1848
1849 btrfs_btree_balance_dirty(fs_info);
1850
1851 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1852 invalidate_extent_cache(root, &key, &next_key);
1853
1854 return ret;
1855}
1856
1857static noinline_for_stack
1858int prepare_to_merge(struct reloc_control *rc, int err)
1859{
1860 struct btrfs_root *root = rc->extent_root;
1861 struct btrfs_fs_info *fs_info = root->fs_info;
1862 struct btrfs_root *reloc_root;
1863 struct btrfs_trans_handle *trans;
1864 LIST_HEAD(reloc_roots);
1865 u64 num_bytes = 0;
1866 int ret;
1867
1868 mutex_lock(&fs_info->reloc_mutex);
1869 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1870 rc->merging_rsv_size += rc->nodes_relocated * 2;
1871 mutex_unlock(&fs_info->reloc_mutex);
1872
1873again:
1874 if (!err) {
1875 num_bytes = rc->merging_rsv_size;
1876 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1877 BTRFS_RESERVE_FLUSH_ALL);
1878 if (ret)
1879 err = ret;
1880 }
1881
1882 trans = btrfs_join_transaction(rc->extent_root);
1883 if (IS_ERR(trans)) {
1884 if (!err)
1885 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1886 num_bytes, NULL);
1887 return PTR_ERR(trans);
1888 }
1889
1890 if (!err) {
1891 if (num_bytes != rc->merging_rsv_size) {
1892 btrfs_end_transaction(trans);
1893 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1894 num_bytes, NULL);
1895 goto again;
1896 }
1897 }
1898
1899 rc->merge_reloc_tree = true;
1900
1901 while (!list_empty(&rc->reloc_roots)) {
1902 reloc_root = list_entry(rc->reloc_roots.next,
1903 struct btrfs_root, root_list);
1904 list_del_init(&reloc_root->root_list);
1905
1906 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1907 false);
1908 if (IS_ERR(root)) {
1909 /*
1910 * Even if we have an error we need this reloc root
1911 * back on our list so we can clean up properly.
1912 */
1913 list_add(&reloc_root->root_list, &reloc_roots);
1914 btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1915 if (!err)
1916 err = PTR_ERR(root);
1917 break;
1918 }
1919
1920 if (unlikely(root->reloc_root != reloc_root)) {
1921 if (root->reloc_root) {
1922 btrfs_err(fs_info,
1923"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
1924 root->root_key.objectid,
1925 root->reloc_root->root_key.objectid,
1926 root->reloc_root->root_key.type,
1927 root->reloc_root->root_key.offset,
1928 btrfs_root_generation(
1929 &root->reloc_root->root_item),
1930 reloc_root->root_key.objectid,
1931 reloc_root->root_key.type,
1932 reloc_root->root_key.offset,
1933 btrfs_root_generation(
1934 &reloc_root->root_item));
1935 } else {
1936 btrfs_err(fs_info,
1937"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
1938 root->root_key.objectid,
1939 reloc_root->root_key.objectid,
1940 reloc_root->root_key.type,
1941 reloc_root->root_key.offset,
1942 btrfs_root_generation(
1943 &reloc_root->root_item));
1944 }
1945 list_add(&reloc_root->root_list, &reloc_roots);
1946 btrfs_put_root(root);
1947 btrfs_abort_transaction(trans, -EUCLEAN);
1948 if (!err)
1949 err = -EUCLEAN;
1950 break;
1951 }
1952
1953 /*
1954 * set reference count to 1, so btrfs_recover_relocation
1955 * knows it should resumes merging
1956 */
1957 if (!err)
1958 btrfs_set_root_refs(&reloc_root->root_item, 1);
1959 ret = btrfs_update_reloc_root(trans, root);
1960
1961 /*
1962 * Even if we have an error we need this reloc root back on our
1963 * list so we can clean up properly.
1964 */
1965 list_add(&reloc_root->root_list, &reloc_roots);
1966 btrfs_put_root(root);
1967
1968 if (ret) {
1969 btrfs_abort_transaction(trans, ret);
1970 if (!err)
1971 err = ret;
1972 break;
1973 }
1974 }
1975
1976 list_splice(&reloc_roots, &rc->reloc_roots);
1977
1978 if (!err)
1979 err = btrfs_commit_transaction(trans);
1980 else
1981 btrfs_end_transaction(trans);
1982 return err;
1983}
1984
1985static noinline_for_stack
1986void free_reloc_roots(struct list_head *list)
1987{
1988 struct btrfs_root *reloc_root, *tmp;
1989
1990 list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1991 __del_reloc_root(reloc_root);
1992}
1993
1994static noinline_for_stack
1995void merge_reloc_roots(struct reloc_control *rc)
1996{
1997 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1998 struct btrfs_root *root;
1999 struct btrfs_root *reloc_root;
2000 LIST_HEAD(reloc_roots);
2001 int found = 0;
2002 int ret = 0;
2003again:
2004 root = rc->extent_root;
2005
2006 /*
2007 * this serializes us with btrfs_record_root_in_transaction,
2008 * we have to make sure nobody is in the middle of
2009 * adding their roots to the list while we are
2010 * doing this splice
2011 */
2012 mutex_lock(&fs_info->reloc_mutex);
2013 list_splice_init(&rc->reloc_roots, &reloc_roots);
2014 mutex_unlock(&fs_info->reloc_mutex);
2015
2016 while (!list_empty(&reloc_roots)) {
2017 found = 1;
2018 reloc_root = list_entry(reloc_roots.next,
2019 struct btrfs_root, root_list);
2020
2021 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
2022 false);
2023 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2024 if (WARN_ON(IS_ERR(root))) {
2025 /*
2026 * For recovery we read the fs roots on mount,
2027 * and if we didn't find the root then we marked
2028 * the reloc root as a garbage root. For normal
2029 * relocation obviously the root should exist in
2030 * memory. However there's no reason we can't
2031 * handle the error properly here just in case.
2032 */
2033 ret = PTR_ERR(root);
2034 goto out;
2035 }
2036 if (WARN_ON(root->reloc_root != reloc_root)) {
2037 /*
2038 * This can happen if on-disk metadata has some
2039 * corruption, e.g. bad reloc tree key offset.
2040 */
2041 ret = -EINVAL;
2042 goto out;
2043 }
2044 ret = merge_reloc_root(rc, root);
2045 btrfs_put_root(root);
2046 if (ret) {
2047 if (list_empty(&reloc_root->root_list))
2048 list_add_tail(&reloc_root->root_list,
2049 &reloc_roots);
2050 goto out;
2051 }
2052 } else {
2053 if (!IS_ERR(root)) {
2054 if (root->reloc_root == reloc_root) {
2055 root->reloc_root = NULL;
2056 btrfs_put_root(reloc_root);
2057 }
2058 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2059 &root->state);
2060 btrfs_put_root(root);
2061 }
2062
2063 list_del_init(&reloc_root->root_list);
2064 /* Don't forget to queue this reloc root for cleanup */
2065 list_add_tail(&reloc_root->reloc_dirty_list,
2066 &rc->dirty_subvol_roots);
2067 }
2068 }
2069
2070 if (found) {
2071 found = 0;
2072 goto again;
2073 }
2074out:
2075 if (ret) {
2076 btrfs_handle_fs_error(fs_info, ret, NULL);
2077 free_reloc_roots(&reloc_roots);
2078
2079 /* new reloc root may be added */
2080 mutex_lock(&fs_info->reloc_mutex);
2081 list_splice_init(&rc->reloc_roots, &reloc_roots);
2082 mutex_unlock(&fs_info->reloc_mutex);
2083 free_reloc_roots(&reloc_roots);
2084 }
2085
2086 /*
2087 * We used to have
2088 *
2089 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2090 *
2091 * here, but it's wrong. If we fail to start the transaction in
2092 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2093 * have actually been removed from the reloc_root_tree rb tree. This is
2094 * fine because we're bailing here, and we hold a reference on the root
2095 * for the list that holds it, so these roots will be cleaned up when we
2096 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
2097 * will be cleaned up on unmount.
2098 *
2099 * The remaining nodes will be cleaned up by free_reloc_control.
2100 */
2101}
2102
2103static void free_block_list(struct rb_root *blocks)
2104{
2105 struct tree_block *block;
2106 struct rb_node *rb_node;
2107 while ((rb_node = rb_first(blocks))) {
2108 block = rb_entry(rb_node, struct tree_block, rb_node);
2109 rb_erase(rb_node, blocks);
2110 kfree(block);
2111 }
2112}
2113
2114static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2115 struct btrfs_root *reloc_root)
2116{
2117 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2118 struct btrfs_root *root;
2119 int ret;
2120
2121 if (reloc_root->last_trans == trans->transid)
2122 return 0;
2123
2124 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2125
2126 /*
2127 * This should succeed, since we can't have a reloc root without having
2128 * already looked up the actual root and created the reloc root for this
2129 * root.
2130 *
2131 * However if there's some sort of corruption where we have a ref to a
2132 * reloc root without a corresponding root this could return ENOENT.
2133 */
2134 if (IS_ERR(root)) {
2135 ASSERT(0);
2136 return PTR_ERR(root);
2137 }
2138 if (root->reloc_root != reloc_root) {
2139 ASSERT(0);
2140 btrfs_err(fs_info,
2141 "root %llu has two reloc roots associated with it",
2142 reloc_root->root_key.offset);
2143 btrfs_put_root(root);
2144 return -EUCLEAN;
2145 }
2146 ret = btrfs_record_root_in_trans(trans, root);
2147 btrfs_put_root(root);
2148
2149 return ret;
2150}
2151
2152static noinline_for_stack
2153struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2154 struct reloc_control *rc,
2155 struct btrfs_backref_node *node,
2156 struct btrfs_backref_edge *edges[])
2157{
2158 struct btrfs_backref_node *next;
2159 struct btrfs_root *root;
2160 int index = 0;
2161 int ret;
2162
2163 next = node;
2164 while (1) {
2165 cond_resched();
2166 next = walk_up_backref(next, edges, &index);
2167 root = next->root;
2168
2169 /*
2170 * If there is no root, then our references for this block are
2171 * incomplete, as we should be able to walk all the way up to a
2172 * block that is owned by a root.
2173 *
2174 * This path is only for SHAREABLE roots, so if we come upon a
2175 * non-SHAREABLE root then we have backrefs that resolve
2176 * improperly.
2177 *
2178 * Both of these cases indicate file system corruption, or a bug
2179 * in the backref walking code.
2180 */
2181 if (!root) {
2182 ASSERT(0);
2183 btrfs_err(trans->fs_info,
2184 "bytenr %llu doesn't have a backref path ending in a root",
2185 node->bytenr);
2186 return ERR_PTR(-EUCLEAN);
2187 }
2188 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2189 ASSERT(0);
2190 btrfs_err(trans->fs_info,
2191 "bytenr %llu has multiple refs with one ending in a non-shareable root",
2192 node->bytenr);
2193 return ERR_PTR(-EUCLEAN);
2194 }
2195
2196 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2197 ret = record_reloc_root_in_trans(trans, root);
2198 if (ret)
2199 return ERR_PTR(ret);
2200 break;
2201 }
2202
2203 ret = btrfs_record_root_in_trans(trans, root);
2204 if (ret)
2205 return ERR_PTR(ret);
2206 root = root->reloc_root;
2207
2208 /*
2209 * We could have raced with another thread which failed, so
2210 * root->reloc_root may not be set, return ENOENT in this case.
2211 */
2212 if (!root)
2213 return ERR_PTR(-ENOENT);
2214
2215 if (next->new_bytenr != root->node->start) {
2216 /*
2217 * We just created the reloc root, so we shouldn't have
2218 * ->new_bytenr set and this shouldn't be in the changed
2219 * list. If it is then we have multiple roots pointing
2220 * at the same bytenr which indicates corruption, or
2221 * we've made a mistake in the backref walking code.
2222 */
2223 ASSERT(next->new_bytenr == 0);
2224 ASSERT(list_empty(&next->list));
2225 if (next->new_bytenr || !list_empty(&next->list)) {
2226 btrfs_err(trans->fs_info,
2227 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2228 node->bytenr, next->bytenr);
2229 return ERR_PTR(-EUCLEAN);
2230 }
2231
2232 next->new_bytenr = root->node->start;
2233 btrfs_put_root(next->root);
2234 next->root = btrfs_grab_root(root);
2235 ASSERT(next->root);
2236 list_add_tail(&next->list,
2237 &rc->backref_cache.changed);
2238 mark_block_processed(rc, next);
2239 break;
2240 }
2241
2242 WARN_ON(1);
2243 root = NULL;
2244 next = walk_down_backref(edges, &index);
2245 if (!next || next->level <= node->level)
2246 break;
2247 }
2248 if (!root) {
2249 /*
2250 * This can happen if there's fs corruption or if there's a bug
2251 * in the backref lookup code.
2252 */
2253 ASSERT(0);
2254 return ERR_PTR(-ENOENT);
2255 }
2256
2257 next = node;
2258 /* setup backref node path for btrfs_reloc_cow_block */
2259 while (1) {
2260 rc->backref_cache.path[next->level] = next;
2261 if (--index < 0)
2262 break;
2263 next = edges[index]->node[UPPER];
2264 }
2265 return root;
2266}
2267
2268/*
2269 * Select a tree root for relocation.
2270 *
2271 * Return NULL if the block is not shareable. We should use do_relocation() in
2272 * this case.
2273 *
2274 * Return a tree root pointer if the block is shareable.
2275 * Return -ENOENT if the block is root of reloc tree.
2276 */
2277static noinline_for_stack
2278struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2279{
2280 struct btrfs_backref_node *next;
2281 struct btrfs_root *root;
2282 struct btrfs_root *fs_root = NULL;
2283 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2284 int index = 0;
2285
2286 next = node;
2287 while (1) {
2288 cond_resched();
2289 next = walk_up_backref(next, edges, &index);
2290 root = next->root;
2291
2292 /*
2293 * This can occur if we have incomplete extent refs leading all
2294 * the way up a particular path, in this case return -EUCLEAN.
2295 */
2296 if (!root)
2297 return ERR_PTR(-EUCLEAN);
2298
2299 /* No other choice for non-shareable tree */
2300 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2301 return root;
2302
2303 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2304 fs_root = root;
2305
2306 if (next != node)
2307 return NULL;
2308
2309 next = walk_down_backref(edges, &index);
2310 if (!next || next->level <= node->level)
2311 break;
2312 }
2313
2314 if (!fs_root)
2315 return ERR_PTR(-ENOENT);
2316 return fs_root;
2317}
2318
2319static noinline_for_stack
2320u64 calcu_metadata_size(struct reloc_control *rc,
2321 struct btrfs_backref_node *node, int reserve)
2322{
2323 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2324 struct btrfs_backref_node *next = node;
2325 struct btrfs_backref_edge *edge;
2326 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2327 u64 num_bytes = 0;
2328 int index = 0;
2329
2330 BUG_ON(reserve && node->processed);
2331
2332 while (next) {
2333 cond_resched();
2334 while (1) {
2335 if (next->processed && (reserve || next != node))
2336 break;
2337
2338 num_bytes += fs_info->nodesize;
2339
2340 if (list_empty(&next->upper))
2341 break;
2342
2343 edge = list_entry(next->upper.next,
2344 struct btrfs_backref_edge, list[LOWER]);
2345 edges[index++] = edge;
2346 next = edge->node[UPPER];
2347 }
2348 next = walk_down_backref(edges, &index);
2349 }
2350 return num_bytes;
2351}
2352
2353static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2354 struct reloc_control *rc,
2355 struct btrfs_backref_node *node)
2356{
2357 struct btrfs_root *root = rc->extent_root;
2358 struct btrfs_fs_info *fs_info = root->fs_info;
2359 u64 num_bytes;
2360 int ret;
2361 u64 tmp;
2362
2363 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2364
2365 trans->block_rsv = rc->block_rsv;
2366 rc->reserved_bytes += num_bytes;
2367
2368 /*
2369 * We are under a transaction here so we can only do limited flushing.
2370 * If we get an enospc just kick back -EAGAIN so we know to drop the
2371 * transaction and try to refill when we can flush all the things.
2372 */
2373 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2374 BTRFS_RESERVE_FLUSH_LIMIT);
2375 if (ret) {
2376 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2377 while (tmp <= rc->reserved_bytes)
2378 tmp <<= 1;
2379 /*
2380 * only one thread can access block_rsv at this point,
2381 * so we don't need hold lock to protect block_rsv.
2382 * we expand more reservation size here to allow enough
2383 * space for relocation and we will return earlier in
2384 * enospc case.
2385 */
2386 rc->block_rsv->size = tmp + fs_info->nodesize *
2387 RELOCATION_RESERVED_NODES;
2388 return -EAGAIN;
2389 }
2390
2391 return 0;
2392}
2393
2394/*
2395 * relocate a block tree, and then update pointers in upper level
2396 * blocks that reference the block to point to the new location.
2397 *
2398 * if called by link_to_upper, the block has already been relocated.
2399 * in that case this function just updates pointers.
2400 */
2401static int do_relocation(struct btrfs_trans_handle *trans,
2402 struct reloc_control *rc,
2403 struct btrfs_backref_node *node,
2404 struct btrfs_key *key,
2405 struct btrfs_path *path, int lowest)
2406{
2407 struct btrfs_backref_node *upper;
2408 struct btrfs_backref_edge *edge;
2409 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2410 struct btrfs_root *root;
2411 struct extent_buffer *eb;
2412 u32 blocksize;
2413 u64 bytenr;
2414 int slot;
2415 int ret = 0;
2416
2417 /*
2418 * If we are lowest then this is the first time we're processing this
2419 * block, and thus shouldn't have an eb associated with it yet.
2420 */
2421 ASSERT(!lowest || !node->eb);
2422
2423 path->lowest_level = node->level + 1;
2424 rc->backref_cache.path[node->level] = node;
2425 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2426 struct btrfs_ref ref = { 0 };
2427
2428 cond_resched();
2429
2430 upper = edge->node[UPPER];
2431 root = select_reloc_root(trans, rc, upper, edges);
2432 if (IS_ERR(root)) {
2433 ret = PTR_ERR(root);
2434 goto next;
2435 }
2436
2437 if (upper->eb && !upper->locked) {
2438 if (!lowest) {
2439 ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2440 if (ret < 0)
2441 goto next;
2442 BUG_ON(ret);
2443 bytenr = btrfs_node_blockptr(upper->eb, slot);
2444 if (node->eb->start == bytenr)
2445 goto next;
2446 }
2447 btrfs_backref_drop_node_buffer(upper);
2448 }
2449
2450 if (!upper->eb) {
2451 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2452 if (ret) {
2453 if (ret > 0)
2454 ret = -ENOENT;
2455
2456 btrfs_release_path(path);
2457 break;
2458 }
2459
2460 if (!upper->eb) {
2461 upper->eb = path->nodes[upper->level];
2462 path->nodes[upper->level] = NULL;
2463 } else {
2464 BUG_ON(upper->eb != path->nodes[upper->level]);
2465 }
2466
2467 upper->locked = 1;
2468 path->locks[upper->level] = 0;
2469
2470 slot = path->slots[upper->level];
2471 btrfs_release_path(path);
2472 } else {
2473 ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2474 if (ret < 0)
2475 goto next;
2476 BUG_ON(ret);
2477 }
2478
2479 bytenr = btrfs_node_blockptr(upper->eb, slot);
2480 if (lowest) {
2481 if (bytenr != node->bytenr) {
2482 btrfs_err(root->fs_info,
2483 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2484 bytenr, node->bytenr, slot,
2485 upper->eb->start);
2486 ret = -EIO;
2487 goto next;
2488 }
2489 } else {
2490 if (node->eb->start == bytenr)
2491 goto next;
2492 }
2493
2494 blocksize = root->fs_info->nodesize;
2495 eb = btrfs_read_node_slot(upper->eb, slot);
2496 if (IS_ERR(eb)) {
2497 ret = PTR_ERR(eb);
2498 goto next;
2499 }
2500 btrfs_tree_lock(eb);
2501
2502 if (!node->eb) {
2503 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2504 slot, &eb, BTRFS_NESTING_COW);
2505 btrfs_tree_unlock(eb);
2506 free_extent_buffer(eb);
2507 if (ret < 0)
2508 goto next;
2509 /*
2510 * We've just COWed this block, it should have updated
2511 * the correct backref node entry.
2512 */
2513 ASSERT(node->eb == eb);
2514 } else {
2515 btrfs_set_node_blockptr(upper->eb, slot,
2516 node->eb->start);
2517 btrfs_set_node_ptr_generation(upper->eb, slot,
2518 trans->transid);
2519 btrfs_mark_buffer_dirty(trans, upper->eb);
2520
2521 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2522 node->eb->start, blocksize,
2523 upper->eb->start,
2524 btrfs_header_owner(upper->eb));
2525 btrfs_init_tree_ref(&ref, node->level,
2526 btrfs_header_owner(upper->eb),
2527 root->root_key.objectid, false);
2528 ret = btrfs_inc_extent_ref(trans, &ref);
2529 if (!ret)
2530 ret = btrfs_drop_subtree(trans, root, eb,
2531 upper->eb);
2532 if (ret)
2533 btrfs_abort_transaction(trans, ret);
2534 }
2535next:
2536 if (!upper->pending)
2537 btrfs_backref_drop_node_buffer(upper);
2538 else
2539 btrfs_backref_unlock_node_buffer(upper);
2540 if (ret)
2541 break;
2542 }
2543
2544 if (!ret && node->pending) {
2545 btrfs_backref_drop_node_buffer(node);
2546 list_move_tail(&node->list, &rc->backref_cache.changed);
2547 node->pending = 0;
2548 }
2549
2550 path->lowest_level = 0;
2551
2552 /*
2553 * We should have allocated all of our space in the block rsv and thus
2554 * shouldn't ENOSPC.
2555 */
2556 ASSERT(ret != -ENOSPC);
2557 return ret;
2558}
2559
2560static int link_to_upper(struct btrfs_trans_handle *trans,
2561 struct reloc_control *rc,
2562 struct btrfs_backref_node *node,
2563 struct btrfs_path *path)
2564{
2565 struct btrfs_key key;
2566
2567 btrfs_node_key_to_cpu(node->eb, &key, 0);
2568 return do_relocation(trans, rc, node, &key, path, 0);
2569}
2570
2571static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2572 struct reloc_control *rc,
2573 struct btrfs_path *path, int err)
2574{
2575 LIST_HEAD(list);
2576 struct btrfs_backref_cache *cache = &rc->backref_cache;
2577 struct btrfs_backref_node *node;
2578 int level;
2579 int ret;
2580
2581 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2582 while (!list_empty(&cache->pending[level])) {
2583 node = list_entry(cache->pending[level].next,
2584 struct btrfs_backref_node, list);
2585 list_move_tail(&node->list, &list);
2586 BUG_ON(!node->pending);
2587
2588 if (!err) {
2589 ret = link_to_upper(trans, rc, node, path);
2590 if (ret < 0)
2591 err = ret;
2592 }
2593 }
2594 list_splice_init(&list, &cache->pending[level]);
2595 }
2596 return err;
2597}
2598
2599/*
2600 * mark a block and all blocks directly/indirectly reference the block
2601 * as processed.
2602 */
2603static void update_processed_blocks(struct reloc_control *rc,
2604 struct btrfs_backref_node *node)
2605{
2606 struct btrfs_backref_node *next = node;
2607 struct btrfs_backref_edge *edge;
2608 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2609 int index = 0;
2610
2611 while (next) {
2612 cond_resched();
2613 while (1) {
2614 if (next->processed)
2615 break;
2616
2617 mark_block_processed(rc, next);
2618
2619 if (list_empty(&next->upper))
2620 break;
2621
2622 edge = list_entry(next->upper.next,
2623 struct btrfs_backref_edge, list[LOWER]);
2624 edges[index++] = edge;
2625 next = edge->node[UPPER];
2626 }
2627 next = walk_down_backref(edges, &index);
2628 }
2629}
2630
2631static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2632{
2633 u32 blocksize = rc->extent_root->fs_info->nodesize;
2634
2635 if (test_range_bit(&rc->processed_blocks, bytenr,
2636 bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
2637 return 1;
2638 return 0;
2639}
2640
2641static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2642 struct tree_block *block)
2643{
2644 struct btrfs_tree_parent_check check = {
2645 .level = block->level,
2646 .owner_root = block->owner,
2647 .transid = block->key.offset
2648 };
2649 struct extent_buffer *eb;
2650
2651 eb = read_tree_block(fs_info, block->bytenr, &check);
2652 if (IS_ERR(eb))
2653 return PTR_ERR(eb);
2654 if (!extent_buffer_uptodate(eb)) {
2655 free_extent_buffer(eb);
2656 return -EIO;
2657 }
2658 if (block->level == 0)
2659 btrfs_item_key_to_cpu(eb, &block->key, 0);
2660 else
2661 btrfs_node_key_to_cpu(eb, &block->key, 0);
2662 free_extent_buffer(eb);
2663 block->key_ready = true;
2664 return 0;
2665}
2666
2667/*
2668 * helper function to relocate a tree block
2669 */
2670static int relocate_tree_block(struct btrfs_trans_handle *trans,
2671 struct reloc_control *rc,
2672 struct btrfs_backref_node *node,
2673 struct btrfs_key *key,
2674 struct btrfs_path *path)
2675{
2676 struct btrfs_root *root;
2677 int ret = 0;
2678
2679 if (!node)
2680 return 0;
2681
2682 /*
2683 * If we fail here we want to drop our backref_node because we are going
2684 * to start over and regenerate the tree for it.
2685 */
2686 ret = reserve_metadata_space(trans, rc, node);
2687 if (ret)
2688 goto out;
2689
2690 BUG_ON(node->processed);
2691 root = select_one_root(node);
2692 if (IS_ERR(root)) {
2693 ret = PTR_ERR(root);
2694
2695 /* See explanation in select_one_root for the -EUCLEAN case. */
2696 ASSERT(ret == -ENOENT);
2697 if (ret == -ENOENT) {
2698 ret = 0;
2699 update_processed_blocks(rc, node);
2700 }
2701 goto out;
2702 }
2703
2704 if (root) {
2705 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2706 /*
2707 * This block was the root block of a root, and this is
2708 * the first time we're processing the block and thus it
2709 * should not have had the ->new_bytenr modified and
2710 * should have not been included on the changed list.
2711 *
2712 * However in the case of corruption we could have
2713 * multiple refs pointing to the same block improperly,
2714 * and thus we would trip over these checks. ASSERT()
2715 * for the developer case, because it could indicate a
2716 * bug in the backref code, however error out for a
2717 * normal user in the case of corruption.
2718 */
2719 ASSERT(node->new_bytenr == 0);
2720 ASSERT(list_empty(&node->list));
2721 if (node->new_bytenr || !list_empty(&node->list)) {
2722 btrfs_err(root->fs_info,
2723 "bytenr %llu has improper references to it",
2724 node->bytenr);
2725 ret = -EUCLEAN;
2726 goto out;
2727 }
2728 ret = btrfs_record_root_in_trans(trans, root);
2729 if (ret)
2730 goto out;
2731 /*
2732 * Another thread could have failed, need to check if we
2733 * have reloc_root actually set.
2734 */
2735 if (!root->reloc_root) {
2736 ret = -ENOENT;
2737 goto out;
2738 }
2739 root = root->reloc_root;
2740 node->new_bytenr = root->node->start;
2741 btrfs_put_root(node->root);
2742 node->root = btrfs_grab_root(root);
2743 ASSERT(node->root);
2744 list_add_tail(&node->list, &rc->backref_cache.changed);
2745 } else {
2746 path->lowest_level = node->level;
2747 if (root == root->fs_info->chunk_root)
2748 btrfs_reserve_chunk_metadata(trans, false);
2749 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2750 btrfs_release_path(path);
2751 if (root == root->fs_info->chunk_root)
2752 btrfs_trans_release_chunk_metadata(trans);
2753 if (ret > 0)
2754 ret = 0;
2755 }
2756 if (!ret)
2757 update_processed_blocks(rc, node);
2758 } else {
2759 ret = do_relocation(trans, rc, node, key, path, 1);
2760 }
2761out:
2762 if (ret || node->level == 0 || node->cowonly)
2763 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2764 return ret;
2765}
2766
2767/*
2768 * relocate a list of blocks
2769 */
2770static noinline_for_stack
2771int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2772 struct reloc_control *rc, struct rb_root *blocks)
2773{
2774 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2775 struct btrfs_backref_node *node;
2776 struct btrfs_path *path;
2777 struct tree_block *block;
2778 struct tree_block *next;
2779 int ret;
2780 int err = 0;
2781
2782 path = btrfs_alloc_path();
2783 if (!path) {
2784 err = -ENOMEM;
2785 goto out_free_blocks;
2786 }
2787
2788 /* Kick in readahead for tree blocks with missing keys */
2789 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2790 if (!block->key_ready)
2791 btrfs_readahead_tree_block(fs_info, block->bytenr,
2792 block->owner, 0,
2793 block->level);
2794 }
2795
2796 /* Get first keys */
2797 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2798 if (!block->key_ready) {
2799 err = get_tree_block_key(fs_info, block);
2800 if (err)
2801 goto out_free_path;
2802 }
2803 }
2804
2805 /* Do tree relocation */
2806 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2807 node = build_backref_tree(trans, rc, &block->key,
2808 block->level, block->bytenr);
2809 if (IS_ERR(node)) {
2810 err = PTR_ERR(node);
2811 goto out;
2812 }
2813
2814 ret = relocate_tree_block(trans, rc, node, &block->key,
2815 path);
2816 if (ret < 0) {
2817 err = ret;
2818 break;
2819 }
2820 }
2821out:
2822 err = finish_pending_nodes(trans, rc, path, err);
2823
2824out_free_path:
2825 btrfs_free_path(path);
2826out_free_blocks:
2827 free_block_list(blocks);
2828 return err;
2829}
2830
2831static noinline_for_stack int prealloc_file_extent_cluster(
2832 struct btrfs_inode *inode,
2833 const struct file_extent_cluster *cluster)
2834{
2835 u64 alloc_hint = 0;
2836 u64 start;
2837 u64 end;
2838 u64 offset = inode->index_cnt;
2839 u64 num_bytes;
2840 int nr;
2841 int ret = 0;
2842 u64 i_size = i_size_read(&inode->vfs_inode);
2843 u64 prealloc_start = cluster->start - offset;
2844 u64 prealloc_end = cluster->end - offset;
2845 u64 cur_offset = prealloc_start;
2846
2847 /*
2848 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2849 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2850 * btrfs_do_readpage() call of previously relocated file cluster.
2851 *
2852 * If the current cluster starts in the above range, btrfs_do_readpage()
2853 * will skip the read, and relocate_one_page() will later writeback
2854 * the padding zeros as new data, causing data corruption.
2855 *
2856 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2857 */
2858 if (!PAGE_ALIGNED(i_size)) {
2859 struct address_space *mapping = inode->vfs_inode.i_mapping;
2860 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2861 const u32 sectorsize = fs_info->sectorsize;
2862 struct page *page;
2863
2864 ASSERT(sectorsize < PAGE_SIZE);
2865 ASSERT(IS_ALIGNED(i_size, sectorsize));
2866
2867 /*
2868 * Subpage can't handle page with DIRTY but without UPTODATE
2869 * bit as it can lead to the following deadlock:
2870 *
2871 * btrfs_read_folio()
2872 * | Page already *locked*
2873 * |- btrfs_lock_and_flush_ordered_range()
2874 * |- btrfs_start_ordered_extent()
2875 * |- extent_write_cache_pages()
2876 * |- lock_page()
2877 * We try to lock the page we already hold.
2878 *
2879 * Here we just writeback the whole data reloc inode, so that
2880 * we will be ensured to have no dirty range in the page, and
2881 * are safe to clear the uptodate bits.
2882 *
2883 * This shouldn't cause too much overhead, as we need to write
2884 * the data back anyway.
2885 */
2886 ret = filemap_write_and_wait(mapping);
2887 if (ret < 0)
2888 return ret;
2889
2890 clear_extent_bits(&inode->io_tree, i_size,
2891 round_up(i_size, PAGE_SIZE) - 1,
2892 EXTENT_UPTODATE);
2893 page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
2894 /*
2895 * If page is freed we don't need to do anything then, as we
2896 * will re-read the whole page anyway.
2897 */
2898 if (page) {
2899 btrfs_subpage_clear_uptodate(fs_info, page_folio(page), i_size,
2900 round_up(i_size, PAGE_SIZE) - i_size);
2901 unlock_page(page);
2902 put_page(page);
2903 }
2904 }
2905
2906 BUG_ON(cluster->start != cluster->boundary[0]);
2907 ret = btrfs_alloc_data_chunk_ondemand(inode,
2908 prealloc_end + 1 - prealloc_start);
2909 if (ret)
2910 return ret;
2911
2912 btrfs_inode_lock(inode, 0);
2913 for (nr = 0; nr < cluster->nr; nr++) {
2914 struct extent_state *cached_state = NULL;
2915
2916 start = cluster->boundary[nr] - offset;
2917 if (nr + 1 < cluster->nr)
2918 end = cluster->boundary[nr + 1] - 1 - offset;
2919 else
2920 end = cluster->end - offset;
2921
2922 lock_extent(&inode->io_tree, start, end, &cached_state);
2923 num_bytes = end + 1 - start;
2924 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2925 num_bytes, num_bytes,
2926 end + 1, &alloc_hint);
2927 cur_offset = end + 1;
2928 unlock_extent(&inode->io_tree, start, end, &cached_state);
2929 if (ret)
2930 break;
2931 }
2932 btrfs_inode_unlock(inode, 0);
2933
2934 if (cur_offset < prealloc_end)
2935 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2936 prealloc_end + 1 - cur_offset);
2937 return ret;
2938}
2939
2940static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
2941 u64 start, u64 end, u64 block_start)
2942{
2943 struct extent_map *em;
2944 struct extent_state *cached_state = NULL;
2945 int ret = 0;
2946
2947 em = alloc_extent_map();
2948 if (!em)
2949 return -ENOMEM;
2950
2951 em->start = start;
2952 em->len = end + 1 - start;
2953 em->block_len = em->len;
2954 em->block_start = block_start;
2955 em->flags |= EXTENT_FLAG_PINNED;
2956
2957 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2958 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
2959 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2960 free_extent_map(em);
2961
2962 return ret;
2963}
2964
2965/*
2966 * Allow error injection to test balance/relocation cancellation
2967 */
2968noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
2969{
2970 return atomic_read(&fs_info->balance_cancel_req) ||
2971 atomic_read(&fs_info->reloc_cancel_req) ||
2972 fatal_signal_pending(current);
2973}
2974ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2975
2976static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
2977 int cluster_nr)
2978{
2979 /* Last extent, use cluster end directly */
2980 if (cluster_nr >= cluster->nr - 1)
2981 return cluster->end;
2982
2983 /* Use next boundary start*/
2984 return cluster->boundary[cluster_nr + 1] - 1;
2985}
2986
2987static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
2988 const struct file_extent_cluster *cluster,
2989 int *cluster_nr, unsigned long page_index)
2990{
2991 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2992 u64 offset = BTRFS_I(inode)->index_cnt;
2993 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2994 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2995 struct page *page;
2996 u64 page_start;
2997 u64 page_end;
2998 u64 cur;
2999 int ret;
3000
3001 ASSERT(page_index <= last_index);
3002 page = find_lock_page(inode->i_mapping, page_index);
3003 if (!page) {
3004 page_cache_sync_readahead(inode->i_mapping, ra, NULL,
3005 page_index, last_index + 1 - page_index);
3006 page = find_or_create_page(inode->i_mapping, page_index, mask);
3007 if (!page)
3008 return -ENOMEM;
3009 }
3010
3011 if (PageReadahead(page))
3012 page_cache_async_readahead(inode->i_mapping, ra, NULL,
3013 page_folio(page), page_index,
3014 last_index + 1 - page_index);
3015
3016 if (!PageUptodate(page)) {
3017 btrfs_read_folio(NULL, page_folio(page));
3018 lock_page(page);
3019 if (!PageUptodate(page)) {
3020 ret = -EIO;
3021 goto release_page;
3022 }
3023 }
3024
3025 /*
3026 * We could have lost page private when we dropped the lock to read the
3027 * page above, make sure we set_page_extent_mapped here so we have any
3028 * of the subpage blocksize stuff we need in place.
3029 */
3030 ret = set_page_extent_mapped(page);
3031 if (ret < 0)
3032 goto release_page;
3033
3034 page_start = page_offset(page);
3035 page_end = page_start + PAGE_SIZE - 1;
3036
3037 /*
3038 * Start from the cluster, as for subpage case, the cluster can start
3039 * inside the page.
3040 */
3041 cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
3042 while (cur <= page_end) {
3043 struct extent_state *cached_state = NULL;
3044 u64 extent_start = cluster->boundary[*cluster_nr] - offset;
3045 u64 extent_end = get_cluster_boundary_end(cluster,
3046 *cluster_nr) - offset;
3047 u64 clamped_start = max(page_start, extent_start);
3048 u64 clamped_end = min(page_end, extent_end);
3049 u32 clamped_len = clamped_end + 1 - clamped_start;
3050
3051 /* Reserve metadata for this range */
3052 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3053 clamped_len, clamped_len,
3054 false);
3055 if (ret)
3056 goto release_page;
3057
3058 /* Mark the range delalloc and dirty for later writeback */
3059 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3060 &cached_state);
3061 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
3062 clamped_end, 0, &cached_state);
3063 if (ret) {
3064 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3065 clamped_start, clamped_end,
3066 EXTENT_LOCKED | EXTENT_BOUNDARY,
3067 &cached_state);
3068 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3069 clamped_len, true);
3070 btrfs_delalloc_release_extents(BTRFS_I(inode),
3071 clamped_len);
3072 goto release_page;
3073 }
3074 btrfs_folio_set_dirty(fs_info, page_folio(page),
3075 clamped_start, clamped_len);
3076
3077 /*
3078 * Set the boundary if it's inside the page.
3079 * Data relocation requires the destination extents to have the
3080 * same size as the source.
3081 * EXTENT_BOUNDARY bit prevents current extent from being merged
3082 * with previous extent.
3083 */
3084 if (in_range(cluster->boundary[*cluster_nr] - offset,
3085 page_start, PAGE_SIZE)) {
3086 u64 boundary_start = cluster->boundary[*cluster_nr] -
3087 offset;
3088 u64 boundary_end = boundary_start +
3089 fs_info->sectorsize - 1;
3090
3091 set_extent_bit(&BTRFS_I(inode)->io_tree,
3092 boundary_start, boundary_end,
3093 EXTENT_BOUNDARY, NULL);
3094 }
3095 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3096 &cached_state);
3097 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
3098 cur += clamped_len;
3099
3100 /* Crossed extent end, go to next extent */
3101 if (cur >= extent_end) {
3102 (*cluster_nr)++;
3103 /* Just finished the last extent of the cluster, exit. */
3104 if (*cluster_nr >= cluster->nr)
3105 break;
3106 }
3107 }
3108 unlock_page(page);
3109 put_page(page);
3110
3111 balance_dirty_pages_ratelimited(inode->i_mapping);
3112 btrfs_throttle(fs_info);
3113 if (btrfs_should_cancel_balance(fs_info))
3114 ret = -ECANCELED;
3115 return ret;
3116
3117release_page:
3118 unlock_page(page);
3119 put_page(page);
3120 return ret;
3121}
3122
3123static int relocate_file_extent_cluster(struct inode *inode,
3124 const struct file_extent_cluster *cluster)
3125{
3126 u64 offset = BTRFS_I(inode)->index_cnt;
3127 unsigned long index;
3128 unsigned long last_index;
3129 struct file_ra_state *ra;
3130 int cluster_nr = 0;
3131 int ret = 0;
3132
3133 if (!cluster->nr)
3134 return 0;
3135
3136 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3137 if (!ra)
3138 return -ENOMEM;
3139
3140 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
3141 if (ret)
3142 goto out;
3143
3144 file_ra_state_init(ra, inode->i_mapping);
3145
3146 ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
3147 cluster->end - offset, cluster->start);
3148 if (ret)
3149 goto out;
3150
3151 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3152 for (index = (cluster->start - offset) >> PAGE_SHIFT;
3153 index <= last_index && !ret; index++)
3154 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
3155 if (ret == 0)
3156 WARN_ON(cluster_nr != cluster->nr);
3157out:
3158 kfree(ra);
3159 return ret;
3160}
3161
3162static noinline_for_stack int relocate_data_extent(struct inode *inode,
3163 const struct btrfs_key *extent_key,
3164 struct file_extent_cluster *cluster)
3165{
3166 int ret;
3167 struct btrfs_root *root = BTRFS_I(inode)->root;
3168
3169 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3170 ret = relocate_file_extent_cluster(inode, cluster);
3171 if (ret)
3172 return ret;
3173 cluster->nr = 0;
3174 }
3175
3176 /*
3177 * Under simple quotas, we set root->relocation_src_root when we find
3178 * the extent. If adjacent extents have different owners, we can't merge
3179 * them while relocating. Handle this by storing the owning root that
3180 * started a cluster and if we see an extent from a different root break
3181 * cluster formation (just like the above case of non-adjacent extents).
3182 *
3183 * Without simple quotas, relocation_src_root is always 0, so we should
3184 * never see a mismatch, and it should have no effect on relocation
3185 * clusters.
3186 */
3187 if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
3188 u64 tmp = root->relocation_src_root;
3189
3190 /*
3191 * root->relocation_src_root is the state that actually affects
3192 * the preallocation we do here, so set it to the root owning
3193 * the cluster we need to relocate.
3194 */
3195 root->relocation_src_root = cluster->owning_root;
3196 ret = relocate_file_extent_cluster(inode, cluster);
3197 if (ret)
3198 return ret;
3199 cluster->nr = 0;
3200 /* And reset it back for the current extent's owning root. */
3201 root->relocation_src_root = tmp;
3202 }
3203
3204 if (!cluster->nr) {
3205 cluster->start = extent_key->objectid;
3206 cluster->owning_root = root->relocation_src_root;
3207 }
3208 else
3209 BUG_ON(cluster->nr >= MAX_EXTENTS);
3210 cluster->end = extent_key->objectid + extent_key->offset - 1;
3211 cluster->boundary[cluster->nr] = extent_key->objectid;
3212 cluster->nr++;
3213
3214 if (cluster->nr >= MAX_EXTENTS) {
3215 ret = relocate_file_extent_cluster(inode, cluster);
3216 if (ret)
3217 return ret;
3218 cluster->nr = 0;
3219 }
3220 return 0;
3221}
3222
3223/*
3224 * helper to add a tree block to the list.
3225 * the major work is getting the generation and level of the block
3226 */
3227static int add_tree_block(struct reloc_control *rc,
3228 const struct btrfs_key *extent_key,
3229 struct btrfs_path *path,
3230 struct rb_root *blocks)
3231{
3232 struct extent_buffer *eb;
3233 struct btrfs_extent_item *ei;
3234 struct btrfs_tree_block_info *bi;
3235 struct tree_block *block;
3236 struct rb_node *rb_node;
3237 u32 item_size;
3238 int level = -1;
3239 u64 generation;
3240 u64 owner = 0;
3241
3242 eb = path->nodes[0];
3243 item_size = btrfs_item_size(eb, path->slots[0]);
3244
3245 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3246 item_size >= sizeof(*ei) + sizeof(*bi)) {
3247 unsigned long ptr = 0, end;
3248
3249 ei = btrfs_item_ptr(eb, path->slots[0],
3250 struct btrfs_extent_item);
3251 end = (unsigned long)ei + item_size;
3252 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3253 bi = (struct btrfs_tree_block_info *)(ei + 1);
3254 level = btrfs_tree_block_level(eb, bi);
3255 ptr = (unsigned long)(bi + 1);
3256 } else {
3257 level = (int)extent_key->offset;
3258 ptr = (unsigned long)(ei + 1);
3259 }
3260 generation = btrfs_extent_generation(eb, ei);
3261
3262 /*
3263 * We're reading random blocks without knowing their owner ahead
3264 * of time. This is ok most of the time, as all reloc roots and
3265 * fs roots have the same lock type. However normal trees do
3266 * not, and the only way to know ahead of time is to read the
3267 * inline ref offset. We know it's an fs root if
3268 *
3269 * 1. There's more than one ref.
3270 * 2. There's a SHARED_DATA_REF_KEY set.
3271 * 3. FULL_BACKREF is set on the flags.
3272 *
3273 * Otherwise it's safe to assume that the ref offset == the
3274 * owner of this block, so we can use that when calling
3275 * read_tree_block.
3276 */
3277 if (btrfs_extent_refs(eb, ei) == 1 &&
3278 !(btrfs_extent_flags(eb, ei) &
3279 BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3280 ptr < end) {
3281 struct btrfs_extent_inline_ref *iref;
3282 int type;
3283
3284 iref = (struct btrfs_extent_inline_ref *)ptr;
3285 type = btrfs_get_extent_inline_ref_type(eb, iref,
3286 BTRFS_REF_TYPE_BLOCK);
3287 if (type == BTRFS_REF_TYPE_INVALID)
3288 return -EINVAL;
3289 if (type == BTRFS_TREE_BLOCK_REF_KEY)
3290 owner = btrfs_extent_inline_ref_offset(eb, iref);
3291 }
3292 } else {
3293 btrfs_print_leaf(eb);
3294 btrfs_err(rc->block_group->fs_info,
3295 "unrecognized tree backref at tree block %llu slot %u",
3296 eb->start, path->slots[0]);
3297 btrfs_release_path(path);
3298 return -EUCLEAN;
3299 }
3300
3301 btrfs_release_path(path);
3302
3303 BUG_ON(level == -1);
3304
3305 block = kmalloc(sizeof(*block), GFP_NOFS);
3306 if (!block)
3307 return -ENOMEM;
3308
3309 block->bytenr = extent_key->objectid;
3310 block->key.objectid = rc->extent_root->fs_info->nodesize;
3311 block->key.offset = generation;
3312 block->level = level;
3313 block->key_ready = false;
3314 block->owner = owner;
3315
3316 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3317 if (rb_node)
3318 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3319 -EEXIST);
3320
3321 return 0;
3322}
3323
3324/*
3325 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3326 */
3327static int __add_tree_block(struct reloc_control *rc,
3328 u64 bytenr, u32 blocksize,
3329 struct rb_root *blocks)
3330{
3331 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3332 struct btrfs_path *path;
3333 struct btrfs_key key;
3334 int ret;
3335 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3336
3337 if (tree_block_processed(bytenr, rc))
3338 return 0;
3339
3340 if (rb_simple_search(blocks, bytenr))
3341 return 0;
3342
3343 path = btrfs_alloc_path();
3344 if (!path)
3345 return -ENOMEM;
3346again:
3347 key.objectid = bytenr;
3348 if (skinny) {
3349 key.type = BTRFS_METADATA_ITEM_KEY;
3350 key.offset = (u64)-1;
3351 } else {
3352 key.type = BTRFS_EXTENT_ITEM_KEY;
3353 key.offset = blocksize;
3354 }
3355
3356 path->search_commit_root = 1;
3357 path->skip_locking = 1;
3358 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3359 if (ret < 0)
3360 goto out;
3361
3362 if (ret > 0 && skinny) {
3363 if (path->slots[0]) {
3364 path->slots[0]--;
3365 btrfs_item_key_to_cpu(path->nodes[0], &key,
3366 path->slots[0]);
3367 if (key.objectid == bytenr &&
3368 (key.type == BTRFS_METADATA_ITEM_KEY ||
3369 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3370 key.offset == blocksize)))
3371 ret = 0;
3372 }
3373
3374 if (ret) {
3375 skinny = false;
3376 btrfs_release_path(path);
3377 goto again;
3378 }
3379 }
3380 if (ret) {
3381 ASSERT(ret == 1);
3382 btrfs_print_leaf(path->nodes[0]);
3383 btrfs_err(fs_info,
3384 "tree block extent item (%llu) is not found in extent tree",
3385 bytenr);
3386 WARN_ON(1);
3387 ret = -EINVAL;
3388 goto out;
3389 }
3390
3391 ret = add_tree_block(rc, &key, path, blocks);
3392out:
3393 btrfs_free_path(path);
3394 return ret;
3395}
3396
3397static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3398 struct btrfs_block_group *block_group,
3399 struct inode *inode,
3400 u64 ino)
3401{
3402 struct btrfs_root *root = fs_info->tree_root;
3403 struct btrfs_trans_handle *trans;
3404 int ret = 0;
3405
3406 if (inode)
3407 goto truncate;
3408
3409 inode = btrfs_iget(fs_info->sb, ino, root);
3410 if (IS_ERR(inode))
3411 return -ENOENT;
3412
3413truncate:
3414 ret = btrfs_check_trunc_cache_free_space(fs_info,
3415 &fs_info->global_block_rsv);
3416 if (ret)
3417 goto out;
3418
3419 trans = btrfs_join_transaction(root);
3420 if (IS_ERR(trans)) {
3421 ret = PTR_ERR(trans);
3422 goto out;
3423 }
3424
3425 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3426
3427 btrfs_end_transaction(trans);
3428 btrfs_btree_balance_dirty(fs_info);
3429out:
3430 iput(inode);
3431 return ret;
3432}
3433
3434/*
3435 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3436 * cache inode, to avoid free space cache data extent blocking data relocation.
3437 */
3438static int delete_v1_space_cache(struct extent_buffer *leaf,
3439 struct btrfs_block_group *block_group,
3440 u64 data_bytenr)
3441{
3442 u64 space_cache_ino;
3443 struct btrfs_file_extent_item *ei;
3444 struct btrfs_key key;
3445 bool found = false;
3446 int i;
3447 int ret;
3448
3449 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3450 return 0;
3451
3452 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3453 u8 type;
3454
3455 btrfs_item_key_to_cpu(leaf, &key, i);
3456 if (key.type != BTRFS_EXTENT_DATA_KEY)
3457 continue;
3458 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3459 type = btrfs_file_extent_type(leaf, ei);
3460
3461 if ((type == BTRFS_FILE_EXTENT_REG ||
3462 type == BTRFS_FILE_EXTENT_PREALLOC) &&
3463 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3464 found = true;
3465 space_cache_ino = key.objectid;
3466 break;
3467 }
3468 }
3469 if (!found)
3470 return -ENOENT;
3471 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3472 space_cache_ino);
3473 return ret;
3474}
3475
3476/*
3477 * helper to find all tree blocks that reference a given data extent
3478 */
3479static noinline_for_stack int add_data_references(struct reloc_control *rc,
3480 const struct btrfs_key *extent_key,
3481 struct btrfs_path *path,
3482 struct rb_root *blocks)
3483{
3484 struct btrfs_backref_walk_ctx ctx = { 0 };
3485 struct ulist_iterator leaf_uiter;
3486 struct ulist_node *ref_node = NULL;
3487 const u32 blocksize = rc->extent_root->fs_info->nodesize;
3488 int ret = 0;
3489
3490 btrfs_release_path(path);
3491
3492 ctx.bytenr = extent_key->objectid;
3493 ctx.skip_inode_ref_list = true;
3494 ctx.fs_info = rc->extent_root->fs_info;
3495
3496 ret = btrfs_find_all_leafs(&ctx);
3497 if (ret < 0)
3498 return ret;
3499
3500 ULIST_ITER_INIT(&leaf_uiter);
3501 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3502 struct btrfs_tree_parent_check check = { 0 };
3503 struct extent_buffer *eb;
3504
3505 eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
3506 if (IS_ERR(eb)) {
3507 ret = PTR_ERR(eb);
3508 break;
3509 }
3510 ret = delete_v1_space_cache(eb, rc->block_group,
3511 extent_key->objectid);
3512 free_extent_buffer(eb);
3513 if (ret < 0)
3514 break;
3515 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3516 if (ret < 0)
3517 break;
3518 }
3519 if (ret < 0)
3520 free_block_list(blocks);
3521 ulist_free(ctx.refs);
3522 return ret;
3523}
3524
3525/*
3526 * helper to find next unprocessed extent
3527 */
3528static noinline_for_stack
3529int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3530 struct btrfs_key *extent_key)
3531{
3532 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3533 struct btrfs_key key;
3534 struct extent_buffer *leaf;
3535 u64 start, end, last;
3536 int ret;
3537
3538 last = rc->block_group->start + rc->block_group->length;
3539 while (1) {
3540 bool block_found;
3541
3542 cond_resched();
3543 if (rc->search_start >= last) {
3544 ret = 1;
3545 break;
3546 }
3547
3548 key.objectid = rc->search_start;
3549 key.type = BTRFS_EXTENT_ITEM_KEY;
3550 key.offset = 0;
3551
3552 path->search_commit_root = 1;
3553 path->skip_locking = 1;
3554 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3555 0, 0);
3556 if (ret < 0)
3557 break;
3558next:
3559 leaf = path->nodes[0];
3560 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3561 ret = btrfs_next_leaf(rc->extent_root, path);
3562 if (ret != 0)
3563 break;
3564 leaf = path->nodes[0];
3565 }
3566
3567 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3568 if (key.objectid >= last) {
3569 ret = 1;
3570 break;
3571 }
3572
3573 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3574 key.type != BTRFS_METADATA_ITEM_KEY) {
3575 path->slots[0]++;
3576 goto next;
3577 }
3578
3579 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3580 key.objectid + key.offset <= rc->search_start) {
3581 path->slots[0]++;
3582 goto next;
3583 }
3584
3585 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3586 key.objectid + fs_info->nodesize <=
3587 rc->search_start) {
3588 path->slots[0]++;
3589 goto next;
3590 }
3591
3592 block_found = find_first_extent_bit(&rc->processed_blocks,
3593 key.objectid, &start, &end,
3594 EXTENT_DIRTY, NULL);
3595
3596 if (block_found && start <= key.objectid) {
3597 btrfs_release_path(path);
3598 rc->search_start = end + 1;
3599 } else {
3600 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3601 rc->search_start = key.objectid + key.offset;
3602 else
3603 rc->search_start = key.objectid +
3604 fs_info->nodesize;
3605 memcpy(extent_key, &key, sizeof(key));
3606 return 0;
3607 }
3608 }
3609 btrfs_release_path(path);
3610 return ret;
3611}
3612
3613static void set_reloc_control(struct reloc_control *rc)
3614{
3615 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3616
3617 mutex_lock(&fs_info->reloc_mutex);
3618 fs_info->reloc_ctl = rc;
3619 mutex_unlock(&fs_info->reloc_mutex);
3620}
3621
3622static void unset_reloc_control(struct reloc_control *rc)
3623{
3624 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3625
3626 mutex_lock(&fs_info->reloc_mutex);
3627 fs_info->reloc_ctl = NULL;
3628 mutex_unlock(&fs_info->reloc_mutex);
3629}
3630
3631static noinline_for_stack
3632int prepare_to_relocate(struct reloc_control *rc)
3633{
3634 struct btrfs_trans_handle *trans;
3635 int ret;
3636
3637 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3638 BTRFS_BLOCK_RSV_TEMP);
3639 if (!rc->block_rsv)
3640 return -ENOMEM;
3641
3642 memset(&rc->cluster, 0, sizeof(rc->cluster));
3643 rc->search_start = rc->block_group->start;
3644 rc->extents_found = 0;
3645 rc->nodes_relocated = 0;
3646 rc->merging_rsv_size = 0;
3647 rc->reserved_bytes = 0;
3648 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3649 RELOCATION_RESERVED_NODES;
3650 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3651 rc->block_rsv, rc->block_rsv->size,
3652 BTRFS_RESERVE_FLUSH_ALL);
3653 if (ret)
3654 return ret;
3655
3656 rc->create_reloc_tree = true;
3657 set_reloc_control(rc);
3658
3659 trans = btrfs_join_transaction(rc->extent_root);
3660 if (IS_ERR(trans)) {
3661 unset_reloc_control(rc);
3662 /*
3663 * extent tree is not a ref_cow tree and has no reloc_root to
3664 * cleanup. And callers are responsible to free the above
3665 * block rsv.
3666 */
3667 return PTR_ERR(trans);
3668 }
3669
3670 ret = btrfs_commit_transaction(trans);
3671 if (ret)
3672 unset_reloc_control(rc);
3673
3674 return ret;
3675}
3676
3677static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3678{
3679 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3680 struct rb_root blocks = RB_ROOT;
3681 struct btrfs_key key;
3682 struct btrfs_trans_handle *trans = NULL;
3683 struct btrfs_path *path;
3684 struct btrfs_extent_item *ei;
3685 u64 flags;
3686 int ret;
3687 int err = 0;
3688 int progress = 0;
3689
3690 path = btrfs_alloc_path();
3691 if (!path)
3692 return -ENOMEM;
3693 path->reada = READA_FORWARD;
3694
3695 ret = prepare_to_relocate(rc);
3696 if (ret) {
3697 err = ret;
3698 goto out_free;
3699 }
3700
3701 while (1) {
3702 rc->reserved_bytes = 0;
3703 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3704 rc->block_rsv->size,
3705 BTRFS_RESERVE_FLUSH_ALL);
3706 if (ret) {
3707 err = ret;
3708 break;
3709 }
3710 progress++;
3711 trans = btrfs_start_transaction(rc->extent_root, 0);
3712 if (IS_ERR(trans)) {
3713 err = PTR_ERR(trans);
3714 trans = NULL;
3715 break;
3716 }
3717restart:
3718 if (update_backref_cache(trans, &rc->backref_cache)) {
3719 btrfs_end_transaction(trans);
3720 trans = NULL;
3721 continue;
3722 }
3723
3724 ret = find_next_extent(rc, path, &key);
3725 if (ret < 0)
3726 err = ret;
3727 if (ret != 0)
3728 break;
3729
3730 rc->extents_found++;
3731
3732 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3733 struct btrfs_extent_item);
3734 flags = btrfs_extent_flags(path->nodes[0], ei);
3735
3736 /*
3737 * If we are relocating a simple quota owned extent item, we
3738 * need to note the owner on the reloc data root so that when
3739 * we allocate the replacement item, we can attribute it to the
3740 * correct eventual owner (rather than the reloc data root).
3741 */
3742 if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
3743 struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
3744 u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
3745 path->nodes[0],
3746 path->slots[0]);
3747
3748 root->relocation_src_root = owning_root_id;
3749 }
3750
3751 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3752 ret = add_tree_block(rc, &key, path, &blocks);
3753 } else if (rc->stage == UPDATE_DATA_PTRS &&
3754 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3755 ret = add_data_references(rc, &key, path, &blocks);
3756 } else {
3757 btrfs_release_path(path);
3758 ret = 0;
3759 }
3760 if (ret < 0) {
3761 err = ret;
3762 break;
3763 }
3764
3765 if (!RB_EMPTY_ROOT(&blocks)) {
3766 ret = relocate_tree_blocks(trans, rc, &blocks);
3767 if (ret < 0) {
3768 if (ret != -EAGAIN) {
3769 err = ret;
3770 break;
3771 }
3772 rc->extents_found--;
3773 rc->search_start = key.objectid;
3774 }
3775 }
3776
3777 btrfs_end_transaction_throttle(trans);
3778 btrfs_btree_balance_dirty(fs_info);
3779 trans = NULL;
3780
3781 if (rc->stage == MOVE_DATA_EXTENTS &&
3782 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3783 rc->found_file_extent = true;
3784 ret = relocate_data_extent(rc->data_inode,
3785 &key, &rc->cluster);
3786 if (ret < 0) {
3787 err = ret;
3788 break;
3789 }
3790 }
3791 if (btrfs_should_cancel_balance(fs_info)) {
3792 err = -ECANCELED;
3793 break;
3794 }
3795 }
3796 if (trans && progress && err == -ENOSPC) {
3797 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3798 if (ret == 1) {
3799 err = 0;
3800 progress = 0;
3801 goto restart;
3802 }
3803 }
3804
3805 btrfs_release_path(path);
3806 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3807
3808 if (trans) {
3809 btrfs_end_transaction_throttle(trans);
3810 btrfs_btree_balance_dirty(fs_info);
3811 }
3812
3813 if (!err) {
3814 ret = relocate_file_extent_cluster(rc->data_inode,
3815 &rc->cluster);
3816 if (ret < 0)
3817 err = ret;
3818 }
3819
3820 rc->create_reloc_tree = false;
3821 set_reloc_control(rc);
3822
3823 btrfs_backref_release_cache(&rc->backref_cache);
3824 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3825
3826 /*
3827 * Even in the case when the relocation is cancelled, we should all go
3828 * through prepare_to_merge() and merge_reloc_roots().
3829 *
3830 * For error (including cancelled balance), prepare_to_merge() will
3831 * mark all reloc trees orphan, then queue them for cleanup in
3832 * merge_reloc_roots()
3833 */
3834 err = prepare_to_merge(rc, err);
3835
3836 merge_reloc_roots(rc);
3837
3838 rc->merge_reloc_tree = false;
3839 unset_reloc_control(rc);
3840 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3841
3842 /* get rid of pinned extents */
3843 trans = btrfs_join_transaction(rc->extent_root);
3844 if (IS_ERR(trans)) {
3845 err = PTR_ERR(trans);
3846 goto out_free;
3847 }
3848 ret = btrfs_commit_transaction(trans);
3849 if (ret && !err)
3850 err = ret;
3851out_free:
3852 ret = clean_dirty_subvols(rc);
3853 if (ret < 0 && !err)
3854 err = ret;
3855 btrfs_free_block_rsv(fs_info, rc->block_rsv);
3856 btrfs_free_path(path);
3857 return err;
3858}
3859
3860static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3861 struct btrfs_root *root, u64 objectid)
3862{
3863 struct btrfs_path *path;
3864 struct btrfs_inode_item *item;
3865 struct extent_buffer *leaf;
3866 int ret;
3867
3868 path = btrfs_alloc_path();
3869 if (!path)
3870 return -ENOMEM;
3871
3872 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3873 if (ret)
3874 goto out;
3875
3876 leaf = path->nodes[0];
3877 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3878 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3879 btrfs_set_inode_generation(leaf, item, 1);
3880 btrfs_set_inode_size(leaf, item, 0);
3881 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3882 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3883 BTRFS_INODE_PREALLOC);
3884 btrfs_mark_buffer_dirty(trans, leaf);
3885out:
3886 btrfs_free_path(path);
3887 return ret;
3888}
3889
3890static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3891 struct btrfs_root *root, u64 objectid)
3892{
3893 struct btrfs_path *path;
3894 struct btrfs_key key;
3895 int ret = 0;
3896
3897 path = btrfs_alloc_path();
3898 if (!path) {
3899 ret = -ENOMEM;
3900 goto out;
3901 }
3902
3903 key.objectid = objectid;
3904 key.type = BTRFS_INODE_ITEM_KEY;
3905 key.offset = 0;
3906 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3907 if (ret) {
3908 if (ret > 0)
3909 ret = -ENOENT;
3910 goto out;
3911 }
3912 ret = btrfs_del_item(trans, root, path);
3913out:
3914 if (ret)
3915 btrfs_abort_transaction(trans, ret);
3916 btrfs_free_path(path);
3917}
3918
3919/*
3920 * helper to create inode for data relocation.
3921 * the inode is in data relocation tree and its link count is 0
3922 */
3923static noinline_for_stack struct inode *create_reloc_inode(
3924 struct btrfs_fs_info *fs_info,
3925 const struct btrfs_block_group *group)
3926{
3927 struct inode *inode = NULL;
3928 struct btrfs_trans_handle *trans;
3929 struct btrfs_root *root;
3930 u64 objectid;
3931 int err = 0;
3932
3933 root = btrfs_grab_root(fs_info->data_reloc_root);
3934 trans = btrfs_start_transaction(root, 6);
3935 if (IS_ERR(trans)) {
3936 btrfs_put_root(root);
3937 return ERR_CAST(trans);
3938 }
3939
3940 err = btrfs_get_free_objectid(root, &objectid);
3941 if (err)
3942 goto out;
3943
3944 err = __insert_orphan_inode(trans, root, objectid);
3945 if (err)
3946 goto out;
3947
3948 inode = btrfs_iget(fs_info->sb, objectid, root);
3949 if (IS_ERR(inode)) {
3950 delete_orphan_inode(trans, root, objectid);
3951 err = PTR_ERR(inode);
3952 inode = NULL;
3953 goto out;
3954 }
3955 BTRFS_I(inode)->index_cnt = group->start;
3956
3957 err = btrfs_orphan_add(trans, BTRFS_I(inode));
3958out:
3959 btrfs_put_root(root);
3960 btrfs_end_transaction(trans);
3961 btrfs_btree_balance_dirty(fs_info);
3962 if (err) {
3963 iput(inode);
3964 inode = ERR_PTR(err);
3965 }
3966 return inode;
3967}
3968
3969/*
3970 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3971 * has been requested meanwhile and don't start in that case.
3972 *
3973 * Return:
3974 * 0 success
3975 * -EINPROGRESS operation is already in progress, that's probably a bug
3976 * -ECANCELED cancellation request was set before the operation started
3977 */
3978static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3979{
3980 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3981 /* This should not happen */
3982 btrfs_err(fs_info, "reloc already running, cannot start");
3983 return -EINPROGRESS;
3984 }
3985
3986 if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3987 btrfs_info(fs_info, "chunk relocation canceled on start");
3988 /*
3989 * On cancel, clear all requests but let the caller mark
3990 * the end after cleanup operations.
3991 */
3992 atomic_set(&fs_info->reloc_cancel_req, 0);
3993 return -ECANCELED;
3994 }
3995 return 0;
3996}
3997
3998/*
3999 * Mark end of chunk relocation that is cancellable and wake any waiters.
4000 */
4001static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
4002{
4003 /* Requested after start, clear bit first so any waiters can continue */
4004 if (atomic_read(&fs_info->reloc_cancel_req) > 0)
4005 btrfs_info(fs_info, "chunk relocation canceled during operation");
4006 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
4007 atomic_set(&fs_info->reloc_cancel_req, 0);
4008}
4009
4010static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
4011{
4012 struct reloc_control *rc;
4013
4014 rc = kzalloc(sizeof(*rc), GFP_NOFS);
4015 if (!rc)
4016 return NULL;
4017
4018 INIT_LIST_HEAD(&rc->reloc_roots);
4019 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
4020 btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
4021 rc->reloc_root_tree.rb_root = RB_ROOT;
4022 spin_lock_init(&rc->reloc_root_tree.lock);
4023 extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
4024 return rc;
4025}
4026
4027static void free_reloc_control(struct reloc_control *rc)
4028{
4029 struct mapping_node *node, *tmp;
4030
4031 free_reloc_roots(&rc->reloc_roots);
4032 rbtree_postorder_for_each_entry_safe(node, tmp,
4033 &rc->reloc_root_tree.rb_root, rb_node)
4034 kfree(node);
4035
4036 kfree(rc);
4037}
4038
4039/*
4040 * Print the block group being relocated
4041 */
4042static void describe_relocation(struct btrfs_fs_info *fs_info,
4043 struct btrfs_block_group *block_group)
4044{
4045 char buf[128] = {'\0'};
4046
4047 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
4048
4049 btrfs_info(fs_info,
4050 "relocating block group %llu flags %s",
4051 block_group->start, buf);
4052}
4053
4054static const char *stage_to_string(enum reloc_stage stage)
4055{
4056 if (stage == MOVE_DATA_EXTENTS)
4057 return "move data extents";
4058 if (stage == UPDATE_DATA_PTRS)
4059 return "update data pointers";
4060 return "unknown";
4061}
4062
4063/*
4064 * function to relocate all extents in a block group.
4065 */
4066int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
4067{
4068 struct btrfs_block_group *bg;
4069 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
4070 struct reloc_control *rc;
4071 struct inode *inode;
4072 struct btrfs_path *path;
4073 int ret;
4074 int rw = 0;
4075 int err = 0;
4076
4077 /*
4078 * This only gets set if we had a half-deleted snapshot on mount. We
4079 * cannot allow relocation to start while we're still trying to clean up
4080 * these pending deletions.
4081 */
4082 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
4083 if (ret)
4084 return ret;
4085
4086 /* We may have been woken up by close_ctree, so bail if we're closing. */
4087 if (btrfs_fs_closing(fs_info))
4088 return -EINTR;
4089
4090 bg = btrfs_lookup_block_group(fs_info, group_start);
4091 if (!bg)
4092 return -ENOENT;
4093
4094 /*
4095 * Relocation of a data block group creates ordered extents. Without
4096 * sb_start_write(), we can freeze the filesystem while unfinished
4097 * ordered extents are left. Such ordered extents can cause a deadlock
4098 * e.g. when syncfs() is waiting for their completion but they can't
4099 * finish because they block when joining a transaction, due to the
4100 * fact that the freeze locks are being held in write mode.
4101 */
4102 if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
4103 ASSERT(sb_write_started(fs_info->sb));
4104
4105 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4106 btrfs_put_block_group(bg);
4107 return -ETXTBSY;
4108 }
4109
4110 rc = alloc_reloc_control(fs_info);
4111 if (!rc) {
4112 btrfs_put_block_group(bg);
4113 return -ENOMEM;
4114 }
4115
4116 ret = reloc_chunk_start(fs_info);
4117 if (ret < 0) {
4118 err = ret;
4119 goto out_put_bg;
4120 }
4121
4122 rc->extent_root = extent_root;
4123 rc->block_group = bg;
4124
4125 ret = btrfs_inc_block_group_ro(rc->block_group, true);
4126 if (ret) {
4127 err = ret;
4128 goto out;
4129 }
4130 rw = 1;
4131
4132 path = btrfs_alloc_path();
4133 if (!path) {
4134 err = -ENOMEM;
4135 goto out;
4136 }
4137
4138 inode = lookup_free_space_inode(rc->block_group, path);
4139 btrfs_free_path(path);
4140
4141 if (!IS_ERR(inode))
4142 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4143 else
4144 ret = PTR_ERR(inode);
4145
4146 if (ret && ret != -ENOENT) {
4147 err = ret;
4148 goto out;
4149 }
4150
4151 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4152 if (IS_ERR(rc->data_inode)) {
4153 err = PTR_ERR(rc->data_inode);
4154 rc->data_inode = NULL;
4155 goto out;
4156 }
4157
4158 describe_relocation(fs_info, rc->block_group);
4159
4160 btrfs_wait_block_group_reservations(rc->block_group);
4161 btrfs_wait_nocow_writers(rc->block_group);
4162 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4163 rc->block_group->start,
4164 rc->block_group->length);
4165
4166 ret = btrfs_zone_finish(rc->block_group);
4167 WARN_ON(ret && ret != -EAGAIN);
4168
4169 while (1) {
4170 enum reloc_stage finishes_stage;
4171
4172 mutex_lock(&fs_info->cleaner_mutex);
4173 ret = relocate_block_group(rc);
4174 mutex_unlock(&fs_info->cleaner_mutex);
4175 if (ret < 0)
4176 err = ret;
4177
4178 finishes_stage = rc->stage;
4179 /*
4180 * We may have gotten ENOSPC after we already dirtied some
4181 * extents. If writeout happens while we're relocating a
4182 * different block group we could end up hitting the
4183 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4184 * btrfs_reloc_cow_block. Make sure we write everything out
4185 * properly so we don't trip over this problem, and then break
4186 * out of the loop if we hit an error.
4187 */
4188 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4189 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4190 (u64)-1);
4191 if (ret)
4192 err = ret;
4193 invalidate_mapping_pages(rc->data_inode->i_mapping,
4194 0, -1);
4195 rc->stage = UPDATE_DATA_PTRS;
4196 }
4197
4198 if (err < 0)
4199 goto out;
4200
4201 if (rc->extents_found == 0)
4202 break;
4203
4204 btrfs_info(fs_info, "found %llu extents, stage: %s",
4205 rc->extents_found, stage_to_string(finishes_stage));
4206 }
4207
4208 WARN_ON(rc->block_group->pinned > 0);
4209 WARN_ON(rc->block_group->reserved > 0);
4210 WARN_ON(rc->block_group->used > 0);
4211out:
4212 if (err && rw)
4213 btrfs_dec_block_group_ro(rc->block_group);
4214 iput(rc->data_inode);
4215out_put_bg:
4216 btrfs_put_block_group(bg);
4217 reloc_chunk_end(fs_info);
4218 free_reloc_control(rc);
4219 return err;
4220}
4221
4222static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4223{
4224 struct btrfs_fs_info *fs_info = root->fs_info;
4225 struct btrfs_trans_handle *trans;
4226 int ret, err;
4227
4228 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4229 if (IS_ERR(trans))
4230 return PTR_ERR(trans);
4231
4232 memset(&root->root_item.drop_progress, 0,
4233 sizeof(root->root_item.drop_progress));
4234 btrfs_set_root_drop_level(&root->root_item, 0);
4235 btrfs_set_root_refs(&root->root_item, 0);
4236 ret = btrfs_update_root(trans, fs_info->tree_root,
4237 &root->root_key, &root->root_item);
4238
4239 err = btrfs_end_transaction(trans);
4240 if (err)
4241 return err;
4242 return ret;
4243}
4244
4245/*
4246 * recover relocation interrupted by system crash.
4247 *
4248 * this function resumes merging reloc trees with corresponding fs trees.
4249 * this is important for keeping the sharing of tree blocks
4250 */
4251int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4252{
4253 LIST_HEAD(reloc_roots);
4254 struct btrfs_key key;
4255 struct btrfs_root *fs_root;
4256 struct btrfs_root *reloc_root;
4257 struct btrfs_path *path;
4258 struct extent_buffer *leaf;
4259 struct reloc_control *rc = NULL;
4260 struct btrfs_trans_handle *trans;
4261 int ret;
4262 int err = 0;
4263
4264 path = btrfs_alloc_path();
4265 if (!path)
4266 return -ENOMEM;
4267 path->reada = READA_BACK;
4268
4269 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4270 key.type = BTRFS_ROOT_ITEM_KEY;
4271 key.offset = (u64)-1;
4272
4273 while (1) {
4274 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4275 path, 0, 0);
4276 if (ret < 0) {
4277 err = ret;
4278 goto out;
4279 }
4280 if (ret > 0) {
4281 if (path->slots[0] == 0)
4282 break;
4283 path->slots[0]--;
4284 }
4285 leaf = path->nodes[0];
4286 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4287 btrfs_release_path(path);
4288
4289 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4290 key.type != BTRFS_ROOT_ITEM_KEY)
4291 break;
4292
4293 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4294 if (IS_ERR(reloc_root)) {
4295 err = PTR_ERR(reloc_root);
4296 goto out;
4297 }
4298
4299 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4300 list_add(&reloc_root->root_list, &reloc_roots);
4301
4302 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4303 fs_root = btrfs_get_fs_root(fs_info,
4304 reloc_root->root_key.offset, false);
4305 if (IS_ERR(fs_root)) {
4306 ret = PTR_ERR(fs_root);
4307 if (ret != -ENOENT) {
4308 err = ret;
4309 goto out;
4310 }
4311 ret = mark_garbage_root(reloc_root);
4312 if (ret < 0) {
4313 err = ret;
4314 goto out;
4315 }
4316 } else {
4317 btrfs_put_root(fs_root);
4318 }
4319 }
4320
4321 if (key.offset == 0)
4322 break;
4323
4324 key.offset--;
4325 }
4326 btrfs_release_path(path);
4327
4328 if (list_empty(&reloc_roots))
4329 goto out;
4330
4331 rc = alloc_reloc_control(fs_info);
4332 if (!rc) {
4333 err = -ENOMEM;
4334 goto out;
4335 }
4336
4337 ret = reloc_chunk_start(fs_info);
4338 if (ret < 0) {
4339 err = ret;
4340 goto out_end;
4341 }
4342
4343 rc->extent_root = btrfs_extent_root(fs_info, 0);
4344
4345 set_reloc_control(rc);
4346
4347 trans = btrfs_join_transaction(rc->extent_root);
4348 if (IS_ERR(trans)) {
4349 err = PTR_ERR(trans);
4350 goto out_unset;
4351 }
4352
4353 rc->merge_reloc_tree = true;
4354
4355 while (!list_empty(&reloc_roots)) {
4356 reloc_root = list_entry(reloc_roots.next,
4357 struct btrfs_root, root_list);
4358 list_del(&reloc_root->root_list);
4359
4360 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4361 list_add_tail(&reloc_root->root_list,
4362 &rc->reloc_roots);
4363 continue;
4364 }
4365
4366 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4367 false);
4368 if (IS_ERR(fs_root)) {
4369 err = PTR_ERR(fs_root);
4370 list_add_tail(&reloc_root->root_list, &reloc_roots);
4371 btrfs_end_transaction(trans);
4372 goto out_unset;
4373 }
4374
4375 err = __add_reloc_root(reloc_root);
4376 ASSERT(err != -EEXIST);
4377 if (err) {
4378 list_add_tail(&reloc_root->root_list, &reloc_roots);
4379 btrfs_put_root(fs_root);
4380 btrfs_end_transaction(trans);
4381 goto out_unset;
4382 }
4383 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4384 btrfs_put_root(fs_root);
4385 }
4386
4387 err = btrfs_commit_transaction(trans);
4388 if (err)
4389 goto out_unset;
4390
4391 merge_reloc_roots(rc);
4392
4393 unset_reloc_control(rc);
4394
4395 trans = btrfs_join_transaction(rc->extent_root);
4396 if (IS_ERR(trans)) {
4397 err = PTR_ERR(trans);
4398 goto out_clean;
4399 }
4400 err = btrfs_commit_transaction(trans);
4401out_clean:
4402 ret = clean_dirty_subvols(rc);
4403 if (ret < 0 && !err)
4404 err = ret;
4405out_unset:
4406 unset_reloc_control(rc);
4407out_end:
4408 reloc_chunk_end(fs_info);
4409 free_reloc_control(rc);
4410out:
4411 free_reloc_roots(&reloc_roots);
4412
4413 btrfs_free_path(path);
4414
4415 if (err == 0) {
4416 /* cleanup orphan inode in data relocation tree */
4417 fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4418 ASSERT(fs_root);
4419 err = btrfs_orphan_cleanup(fs_root);
4420 btrfs_put_root(fs_root);
4421 }
4422 return err;
4423}
4424
4425/*
4426 * helper to add ordered checksum for data relocation.
4427 *
4428 * cloning checksum properly handles the nodatasum extents.
4429 * it also saves CPU time to re-calculate the checksum.
4430 */
4431int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
4432{
4433 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
4434 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4435 u64 disk_bytenr = ordered->file_offset + inode->index_cnt;
4436 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4437 LIST_HEAD(list);
4438 int ret;
4439
4440 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
4441 disk_bytenr + ordered->num_bytes - 1,
4442 &list, 0, false);
4443 if (ret)
4444 return ret;
4445
4446 while (!list_empty(&list)) {
4447 struct btrfs_ordered_sum *sums =
4448 list_entry(list.next, struct btrfs_ordered_sum, list);
4449
4450 list_del_init(&sums->list);
4451
4452 /*
4453 * We need to offset the new_bytenr based on where the csum is.
4454 * We need to do this because we will read in entire prealloc
4455 * extents but we may have written to say the middle of the
4456 * prealloc extent, so we need to make sure the csum goes with
4457 * the right disk offset.
4458 *
4459 * We can do this because the data reloc inode refers strictly
4460 * to the on disk bytes, so we don't have to worry about
4461 * disk_len vs real len like with real inodes since it's all
4462 * disk length.
4463 */
4464 sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr;
4465 btrfs_add_ordered_sum(ordered, sums);
4466 }
4467
4468 return 0;
4469}
4470
4471int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4472 struct btrfs_root *root,
4473 const struct extent_buffer *buf,
4474 struct extent_buffer *cow)
4475{
4476 struct btrfs_fs_info *fs_info = root->fs_info;
4477 struct reloc_control *rc;
4478 struct btrfs_backref_node *node;
4479 int first_cow = 0;
4480 int level;
4481 int ret = 0;
4482
4483 rc = fs_info->reloc_ctl;
4484 if (!rc)
4485 return 0;
4486
4487 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4488
4489 level = btrfs_header_level(buf);
4490 if (btrfs_header_generation(buf) <=
4491 btrfs_root_last_snapshot(&root->root_item))
4492 first_cow = 1;
4493
4494 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4495 rc->create_reloc_tree) {
4496 WARN_ON(!first_cow && level == 0);
4497
4498 node = rc->backref_cache.path[level];
4499 BUG_ON(node->bytenr != buf->start &&
4500 node->new_bytenr != buf->start);
4501
4502 btrfs_backref_drop_node_buffer(node);
4503 atomic_inc(&cow->refs);
4504 node->eb = cow;
4505 node->new_bytenr = cow->start;
4506
4507 if (!node->pending) {
4508 list_move_tail(&node->list,
4509 &rc->backref_cache.pending[level]);
4510 node->pending = 1;
4511 }
4512
4513 if (first_cow)
4514 mark_block_processed(rc, node);
4515
4516 if (first_cow && level > 0)
4517 rc->nodes_relocated += buf->len;
4518 }
4519
4520 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4521 ret = replace_file_extents(trans, rc, root, cow);
4522 return ret;
4523}
4524
4525/*
4526 * called before creating snapshot. it calculates metadata reservation
4527 * required for relocating tree blocks in the snapshot
4528 */
4529void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4530 u64 *bytes_to_reserve)
4531{
4532 struct btrfs_root *root = pending->root;
4533 struct reloc_control *rc = root->fs_info->reloc_ctl;
4534
4535 if (!rc || !have_reloc_root(root))
4536 return;
4537
4538 if (!rc->merge_reloc_tree)
4539 return;
4540
4541 root = root->reloc_root;
4542 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4543 /*
4544 * relocation is in the stage of merging trees. the space
4545 * used by merging a reloc tree is twice the size of
4546 * relocated tree nodes in the worst case. half for cowing
4547 * the reloc tree, half for cowing the fs tree. the space
4548 * used by cowing the reloc tree will be freed after the
4549 * tree is dropped. if we create snapshot, cowing the fs
4550 * tree may use more space than it frees. so we need
4551 * reserve extra space.
4552 */
4553 *bytes_to_reserve += rc->nodes_relocated;
4554}
4555
4556/*
4557 * called after snapshot is created. migrate block reservation
4558 * and create reloc root for the newly created snapshot
4559 *
4560 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4561 * references held on the reloc_root, one for root->reloc_root and one for
4562 * rc->reloc_roots.
4563 */
4564int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4565 struct btrfs_pending_snapshot *pending)
4566{
4567 struct btrfs_root *root = pending->root;
4568 struct btrfs_root *reloc_root;
4569 struct btrfs_root *new_root;
4570 struct reloc_control *rc = root->fs_info->reloc_ctl;
4571 int ret;
4572
4573 if (!rc || !have_reloc_root(root))
4574 return 0;
4575
4576 rc = root->fs_info->reloc_ctl;
4577 rc->merging_rsv_size += rc->nodes_relocated;
4578
4579 if (rc->merge_reloc_tree) {
4580 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4581 rc->block_rsv,
4582 rc->nodes_relocated, true);
4583 if (ret)
4584 return ret;
4585 }
4586
4587 new_root = pending->snap;
4588 reloc_root = create_reloc_root(trans, root->reloc_root,
4589 new_root->root_key.objectid);
4590 if (IS_ERR(reloc_root))
4591 return PTR_ERR(reloc_root);
4592
4593 ret = __add_reloc_root(reloc_root);
4594 ASSERT(ret != -EEXIST);
4595 if (ret) {
4596 /* Pairs with create_reloc_root */
4597 btrfs_put_root(reloc_root);
4598 return ret;
4599 }
4600 new_root->reloc_root = btrfs_grab_root(reloc_root);
4601
4602 if (rc->create_reloc_tree)
4603 ret = clone_backref_node(trans, rc, root, reloc_root);
4604 return ret;
4605}
4606
4607/*
4608 * Get the current bytenr for the block group which is being relocated.
4609 *
4610 * Return U64_MAX if no running relocation.
4611 */
4612u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
4613{
4614 u64 logical = U64_MAX;
4615
4616 lockdep_assert_held(&fs_info->reloc_mutex);
4617
4618 if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group)
4619 logical = fs_info->reloc_ctl->block_group->start;
4620 return logical;
4621}