Loading...
1/*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include "ctree.h"
26#include "disk-io.h"
27#include "transaction.h"
28#include "volumes.h"
29#include "locking.h"
30#include "btrfs_inode.h"
31#include "async-thread.h"
32#include "free-space-cache.h"
33#include "inode-map.h"
34
35/*
36 * backref_node, mapping_node and tree_block start with this
37 */
38struct tree_entry {
39 struct rb_node rb_node;
40 u64 bytenr;
41};
42
43/*
44 * present a tree block in the backref cache
45 */
46struct backref_node {
47 struct rb_node rb_node;
48 u64 bytenr;
49
50 u64 new_bytenr;
51 /* objectid of tree block owner, can be not uptodate */
52 u64 owner;
53 /* link to pending, changed or detached list */
54 struct list_head list;
55 /* list of upper level blocks reference this block */
56 struct list_head upper;
57 /* list of child blocks in the cache */
58 struct list_head lower;
59 /* NULL if this node is not tree root */
60 struct btrfs_root *root;
61 /* extent buffer got by COW the block */
62 struct extent_buffer *eb;
63 /* level of tree block */
64 unsigned int level:8;
65 /* is the block in non-reference counted tree */
66 unsigned int cowonly:1;
67 /* 1 if no child node in the cache */
68 unsigned int lowest:1;
69 /* is the extent buffer locked */
70 unsigned int locked:1;
71 /* has the block been processed */
72 unsigned int processed:1;
73 /* have backrefs of this block been checked */
74 unsigned int checked:1;
75 /*
76 * 1 if corresponding block has been cowed but some upper
77 * level block pointers may not point to the new location
78 */
79 unsigned int pending:1;
80 /*
81 * 1 if the backref node isn't connected to any other
82 * backref node.
83 */
84 unsigned int detached:1;
85};
86
87/*
88 * present a block pointer in the backref cache
89 */
90struct backref_edge {
91 struct list_head list[2];
92 struct backref_node *node[2];
93};
94
95#define LOWER 0
96#define UPPER 1
97
98struct backref_cache {
99 /* red black tree of all backref nodes in the cache */
100 struct rb_root rb_root;
101 /* for passing backref nodes to btrfs_reloc_cow_block */
102 struct backref_node *path[BTRFS_MAX_LEVEL];
103 /*
104 * list of blocks that have been cowed but some block
105 * pointers in upper level blocks may not reflect the
106 * new location
107 */
108 struct list_head pending[BTRFS_MAX_LEVEL];
109 /* list of backref nodes with no child node */
110 struct list_head leaves;
111 /* list of blocks that have been cowed in current transaction */
112 struct list_head changed;
113 /* list of detached backref node. */
114 struct list_head detached;
115
116 u64 last_trans;
117
118 int nr_nodes;
119 int nr_edges;
120};
121
122/*
123 * map address of tree root to tree
124 */
125struct mapping_node {
126 struct rb_node rb_node;
127 u64 bytenr;
128 void *data;
129};
130
131struct mapping_tree {
132 struct rb_root rb_root;
133 spinlock_t lock;
134};
135
136/*
137 * present a tree block to process
138 */
139struct tree_block {
140 struct rb_node rb_node;
141 u64 bytenr;
142 struct btrfs_key key;
143 unsigned int level:8;
144 unsigned int key_ready:1;
145};
146
147#define MAX_EXTENTS 128
148
149struct file_extent_cluster {
150 u64 start;
151 u64 end;
152 u64 boundary[MAX_EXTENTS];
153 unsigned int nr;
154};
155
156struct reloc_control {
157 /* block group to relocate */
158 struct btrfs_block_group_cache *block_group;
159 /* extent tree */
160 struct btrfs_root *extent_root;
161 /* inode for moving data */
162 struct inode *data_inode;
163
164 struct btrfs_block_rsv *block_rsv;
165
166 struct backref_cache backref_cache;
167
168 struct file_extent_cluster cluster;
169 /* tree blocks have been processed */
170 struct extent_io_tree processed_blocks;
171 /* map start of tree root to corresponding reloc tree */
172 struct mapping_tree reloc_root_tree;
173 /* list of reloc trees */
174 struct list_head reloc_roots;
175 /* size of metadata reservation for merging reloc trees */
176 u64 merging_rsv_size;
177 /* size of relocated tree nodes */
178 u64 nodes_relocated;
179
180 u64 search_start;
181 u64 extents_found;
182
183 unsigned int stage:8;
184 unsigned int create_reloc_tree:1;
185 unsigned int merge_reloc_tree:1;
186 unsigned int found_file_extent:1;
187 unsigned int commit_transaction:1;
188};
189
190/* stages of data relocation */
191#define MOVE_DATA_EXTENTS 0
192#define UPDATE_DATA_PTRS 1
193
194static void remove_backref_node(struct backref_cache *cache,
195 struct backref_node *node);
196static void __mark_block_processed(struct reloc_control *rc,
197 struct backref_node *node);
198
199static void mapping_tree_init(struct mapping_tree *tree)
200{
201 tree->rb_root = RB_ROOT;
202 spin_lock_init(&tree->lock);
203}
204
205static void backref_cache_init(struct backref_cache *cache)
206{
207 int i;
208 cache->rb_root = RB_ROOT;
209 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
210 INIT_LIST_HEAD(&cache->pending[i]);
211 INIT_LIST_HEAD(&cache->changed);
212 INIT_LIST_HEAD(&cache->detached);
213 INIT_LIST_HEAD(&cache->leaves);
214}
215
216static void backref_cache_cleanup(struct backref_cache *cache)
217{
218 struct backref_node *node;
219 int i;
220
221 while (!list_empty(&cache->detached)) {
222 node = list_entry(cache->detached.next,
223 struct backref_node, list);
224 remove_backref_node(cache, node);
225 }
226
227 while (!list_empty(&cache->leaves)) {
228 node = list_entry(cache->leaves.next,
229 struct backref_node, lower);
230 remove_backref_node(cache, node);
231 }
232
233 cache->last_trans = 0;
234
235 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
236 BUG_ON(!list_empty(&cache->pending[i]));
237 BUG_ON(!list_empty(&cache->changed));
238 BUG_ON(!list_empty(&cache->detached));
239 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
240 BUG_ON(cache->nr_nodes);
241 BUG_ON(cache->nr_edges);
242}
243
244static struct backref_node *alloc_backref_node(struct backref_cache *cache)
245{
246 struct backref_node *node;
247
248 node = kzalloc(sizeof(*node), GFP_NOFS);
249 if (node) {
250 INIT_LIST_HEAD(&node->list);
251 INIT_LIST_HEAD(&node->upper);
252 INIT_LIST_HEAD(&node->lower);
253 RB_CLEAR_NODE(&node->rb_node);
254 cache->nr_nodes++;
255 }
256 return node;
257}
258
259static void free_backref_node(struct backref_cache *cache,
260 struct backref_node *node)
261{
262 if (node) {
263 cache->nr_nodes--;
264 kfree(node);
265 }
266}
267
268static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
269{
270 struct backref_edge *edge;
271
272 edge = kzalloc(sizeof(*edge), GFP_NOFS);
273 if (edge)
274 cache->nr_edges++;
275 return edge;
276}
277
278static void free_backref_edge(struct backref_cache *cache,
279 struct backref_edge *edge)
280{
281 if (edge) {
282 cache->nr_edges--;
283 kfree(edge);
284 }
285}
286
287static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
288 struct rb_node *node)
289{
290 struct rb_node **p = &root->rb_node;
291 struct rb_node *parent = NULL;
292 struct tree_entry *entry;
293
294 while (*p) {
295 parent = *p;
296 entry = rb_entry(parent, struct tree_entry, rb_node);
297
298 if (bytenr < entry->bytenr)
299 p = &(*p)->rb_left;
300 else if (bytenr > entry->bytenr)
301 p = &(*p)->rb_right;
302 else
303 return parent;
304 }
305
306 rb_link_node(node, parent, p);
307 rb_insert_color(node, root);
308 return NULL;
309}
310
311static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
312{
313 struct rb_node *n = root->rb_node;
314 struct tree_entry *entry;
315
316 while (n) {
317 entry = rb_entry(n, struct tree_entry, rb_node);
318
319 if (bytenr < entry->bytenr)
320 n = n->rb_left;
321 else if (bytenr > entry->bytenr)
322 n = n->rb_right;
323 else
324 return n;
325 }
326 return NULL;
327}
328
329void backref_tree_panic(struct rb_node *rb_node, int errno,
330 u64 bytenr)
331{
332
333 struct btrfs_fs_info *fs_info = NULL;
334 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
335 rb_node);
336 if (bnode->root)
337 fs_info = bnode->root->fs_info;
338 btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
339 "found at offset %llu\n", (unsigned long long)bytenr);
340}
341
342/*
343 * walk up backref nodes until reach node presents tree root
344 */
345static struct backref_node *walk_up_backref(struct backref_node *node,
346 struct backref_edge *edges[],
347 int *index)
348{
349 struct backref_edge *edge;
350 int idx = *index;
351
352 while (!list_empty(&node->upper)) {
353 edge = list_entry(node->upper.next,
354 struct backref_edge, list[LOWER]);
355 edges[idx++] = edge;
356 node = edge->node[UPPER];
357 }
358 BUG_ON(node->detached);
359 *index = idx;
360 return node;
361}
362
363/*
364 * walk down backref nodes to find start of next reference path
365 */
366static struct backref_node *walk_down_backref(struct backref_edge *edges[],
367 int *index)
368{
369 struct backref_edge *edge;
370 struct backref_node *lower;
371 int idx = *index;
372
373 while (idx > 0) {
374 edge = edges[idx - 1];
375 lower = edge->node[LOWER];
376 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
377 idx--;
378 continue;
379 }
380 edge = list_entry(edge->list[LOWER].next,
381 struct backref_edge, list[LOWER]);
382 edges[idx - 1] = edge;
383 *index = idx;
384 return edge->node[UPPER];
385 }
386 *index = 0;
387 return NULL;
388}
389
390static void unlock_node_buffer(struct backref_node *node)
391{
392 if (node->locked) {
393 btrfs_tree_unlock(node->eb);
394 node->locked = 0;
395 }
396}
397
398static void drop_node_buffer(struct backref_node *node)
399{
400 if (node->eb) {
401 unlock_node_buffer(node);
402 free_extent_buffer(node->eb);
403 node->eb = NULL;
404 }
405}
406
407static void drop_backref_node(struct backref_cache *tree,
408 struct backref_node *node)
409{
410 BUG_ON(!list_empty(&node->upper));
411
412 drop_node_buffer(node);
413 list_del(&node->list);
414 list_del(&node->lower);
415 if (!RB_EMPTY_NODE(&node->rb_node))
416 rb_erase(&node->rb_node, &tree->rb_root);
417 free_backref_node(tree, node);
418}
419
420/*
421 * remove a backref node from the backref cache
422 */
423static void remove_backref_node(struct backref_cache *cache,
424 struct backref_node *node)
425{
426 struct backref_node *upper;
427 struct backref_edge *edge;
428
429 if (!node)
430 return;
431
432 BUG_ON(!node->lowest && !node->detached);
433 while (!list_empty(&node->upper)) {
434 edge = list_entry(node->upper.next, struct backref_edge,
435 list[LOWER]);
436 upper = edge->node[UPPER];
437 list_del(&edge->list[LOWER]);
438 list_del(&edge->list[UPPER]);
439 free_backref_edge(cache, edge);
440
441 if (RB_EMPTY_NODE(&upper->rb_node)) {
442 BUG_ON(!list_empty(&node->upper));
443 drop_backref_node(cache, node);
444 node = upper;
445 node->lowest = 1;
446 continue;
447 }
448 /*
449 * add the node to leaf node list if no other
450 * child block cached.
451 */
452 if (list_empty(&upper->lower)) {
453 list_add_tail(&upper->lower, &cache->leaves);
454 upper->lowest = 1;
455 }
456 }
457
458 drop_backref_node(cache, node);
459}
460
461static void update_backref_node(struct backref_cache *cache,
462 struct backref_node *node, u64 bytenr)
463{
464 struct rb_node *rb_node;
465 rb_erase(&node->rb_node, &cache->rb_root);
466 node->bytenr = bytenr;
467 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
468 if (rb_node)
469 backref_tree_panic(rb_node, -EEXIST, bytenr);
470}
471
472/*
473 * update backref cache after a transaction commit
474 */
475static int update_backref_cache(struct btrfs_trans_handle *trans,
476 struct backref_cache *cache)
477{
478 struct backref_node *node;
479 int level = 0;
480
481 if (cache->last_trans == 0) {
482 cache->last_trans = trans->transid;
483 return 0;
484 }
485
486 if (cache->last_trans == trans->transid)
487 return 0;
488
489 /*
490 * detached nodes are used to avoid unnecessary backref
491 * lookup. transaction commit changes the extent tree.
492 * so the detached nodes are no longer useful.
493 */
494 while (!list_empty(&cache->detached)) {
495 node = list_entry(cache->detached.next,
496 struct backref_node, list);
497 remove_backref_node(cache, node);
498 }
499
500 while (!list_empty(&cache->changed)) {
501 node = list_entry(cache->changed.next,
502 struct backref_node, list);
503 list_del_init(&node->list);
504 BUG_ON(node->pending);
505 update_backref_node(cache, node, node->new_bytenr);
506 }
507
508 /*
509 * some nodes can be left in the pending list if there were
510 * errors during processing the pending nodes.
511 */
512 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
513 list_for_each_entry(node, &cache->pending[level], list) {
514 BUG_ON(!node->pending);
515 if (node->bytenr == node->new_bytenr)
516 continue;
517 update_backref_node(cache, node, node->new_bytenr);
518 }
519 }
520
521 cache->last_trans = 0;
522 return 1;
523}
524
525
526static int should_ignore_root(struct btrfs_root *root)
527{
528 struct btrfs_root *reloc_root;
529
530 if (!root->ref_cows)
531 return 0;
532
533 reloc_root = root->reloc_root;
534 if (!reloc_root)
535 return 0;
536
537 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
538 root->fs_info->running_transaction->transid - 1)
539 return 0;
540 /*
541 * if there is reloc tree and it was created in previous
542 * transaction backref lookup can find the reloc tree,
543 * so backref node for the fs tree root is useless for
544 * relocation.
545 */
546 return 1;
547}
548/*
549 * find reloc tree by address of tree root
550 */
551static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
552 u64 bytenr)
553{
554 struct rb_node *rb_node;
555 struct mapping_node *node;
556 struct btrfs_root *root = NULL;
557
558 spin_lock(&rc->reloc_root_tree.lock);
559 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
560 if (rb_node) {
561 node = rb_entry(rb_node, struct mapping_node, rb_node);
562 root = (struct btrfs_root *)node->data;
563 }
564 spin_unlock(&rc->reloc_root_tree.lock);
565 return root;
566}
567
568static int is_cowonly_root(u64 root_objectid)
569{
570 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
571 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
572 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
573 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
574 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
575 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
576 return 1;
577 return 0;
578}
579
580static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
581 u64 root_objectid)
582{
583 struct btrfs_key key;
584
585 key.objectid = root_objectid;
586 key.type = BTRFS_ROOT_ITEM_KEY;
587 if (is_cowonly_root(root_objectid))
588 key.offset = 0;
589 else
590 key.offset = (u64)-1;
591
592 return btrfs_read_fs_root_no_name(fs_info, &key);
593}
594
595#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
596static noinline_for_stack
597struct btrfs_root *find_tree_root(struct reloc_control *rc,
598 struct extent_buffer *leaf,
599 struct btrfs_extent_ref_v0 *ref0)
600{
601 struct btrfs_root *root;
602 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
603 u64 generation = btrfs_ref_generation_v0(leaf, ref0);
604
605 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
606
607 root = read_fs_root(rc->extent_root->fs_info, root_objectid);
608 BUG_ON(IS_ERR(root));
609
610 if (root->ref_cows &&
611 generation != btrfs_root_generation(&root->root_item))
612 return NULL;
613
614 return root;
615}
616#endif
617
618static noinline_for_stack
619int find_inline_backref(struct extent_buffer *leaf, int slot,
620 unsigned long *ptr, unsigned long *end)
621{
622 struct btrfs_extent_item *ei;
623 struct btrfs_tree_block_info *bi;
624 u32 item_size;
625
626 item_size = btrfs_item_size_nr(leaf, slot);
627#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
628 if (item_size < sizeof(*ei)) {
629 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
630 return 1;
631 }
632#endif
633 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
634 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
635 BTRFS_EXTENT_FLAG_TREE_BLOCK));
636
637 if (item_size <= sizeof(*ei) + sizeof(*bi)) {
638 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
639 return 1;
640 }
641
642 bi = (struct btrfs_tree_block_info *)(ei + 1);
643 *ptr = (unsigned long)(bi + 1);
644 *end = (unsigned long)ei + item_size;
645 return 0;
646}
647
648/*
649 * build backref tree for a given tree block. root of the backref tree
650 * corresponds the tree block, leaves of the backref tree correspond
651 * roots of b-trees that reference the tree block.
652 *
653 * the basic idea of this function is check backrefs of a given block
654 * to find upper level blocks that refernece the block, and then check
655 * bakcrefs of these upper level blocks recursively. the recursion stop
656 * when tree root is reached or backrefs for the block is cached.
657 *
658 * NOTE: if we find backrefs for a block are cached, we know backrefs
659 * for all upper level blocks that directly/indirectly reference the
660 * block are also cached.
661 */
662static noinline_for_stack
663struct backref_node *build_backref_tree(struct reloc_control *rc,
664 struct btrfs_key *node_key,
665 int level, u64 bytenr)
666{
667 struct backref_cache *cache = &rc->backref_cache;
668 struct btrfs_path *path1;
669 struct btrfs_path *path2;
670 struct extent_buffer *eb;
671 struct btrfs_root *root;
672 struct backref_node *cur;
673 struct backref_node *upper;
674 struct backref_node *lower;
675 struct backref_node *node = NULL;
676 struct backref_node *exist = NULL;
677 struct backref_edge *edge;
678 struct rb_node *rb_node;
679 struct btrfs_key key;
680 unsigned long end;
681 unsigned long ptr;
682 LIST_HEAD(list);
683 LIST_HEAD(useless);
684 int cowonly;
685 int ret;
686 int err = 0;
687
688 path1 = btrfs_alloc_path();
689 path2 = btrfs_alloc_path();
690 if (!path1 || !path2) {
691 err = -ENOMEM;
692 goto out;
693 }
694 path1->reada = 1;
695 path2->reada = 2;
696
697 node = alloc_backref_node(cache);
698 if (!node) {
699 err = -ENOMEM;
700 goto out;
701 }
702
703 node->bytenr = bytenr;
704 node->level = level;
705 node->lowest = 1;
706 cur = node;
707again:
708 end = 0;
709 ptr = 0;
710 key.objectid = cur->bytenr;
711 key.type = BTRFS_EXTENT_ITEM_KEY;
712 key.offset = (u64)-1;
713
714 path1->search_commit_root = 1;
715 path1->skip_locking = 1;
716 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
717 0, 0);
718 if (ret < 0) {
719 err = ret;
720 goto out;
721 }
722 BUG_ON(!ret || !path1->slots[0]);
723
724 path1->slots[0]--;
725
726 WARN_ON(cur->checked);
727 if (!list_empty(&cur->upper)) {
728 /*
729 * the backref was added previously when processing
730 * backref of type BTRFS_TREE_BLOCK_REF_KEY
731 */
732 BUG_ON(!list_is_singular(&cur->upper));
733 edge = list_entry(cur->upper.next, struct backref_edge,
734 list[LOWER]);
735 BUG_ON(!list_empty(&edge->list[UPPER]));
736 exist = edge->node[UPPER];
737 /*
738 * add the upper level block to pending list if we need
739 * check its backrefs
740 */
741 if (!exist->checked)
742 list_add_tail(&edge->list[UPPER], &list);
743 } else {
744 exist = NULL;
745 }
746
747 while (1) {
748 cond_resched();
749 eb = path1->nodes[0];
750
751 if (ptr >= end) {
752 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
753 ret = btrfs_next_leaf(rc->extent_root, path1);
754 if (ret < 0) {
755 err = ret;
756 goto out;
757 }
758 if (ret > 0)
759 break;
760 eb = path1->nodes[0];
761 }
762
763 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
764 if (key.objectid != cur->bytenr) {
765 WARN_ON(exist);
766 break;
767 }
768
769 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
770 ret = find_inline_backref(eb, path1->slots[0],
771 &ptr, &end);
772 if (ret)
773 goto next;
774 }
775 }
776
777 if (ptr < end) {
778 /* update key for inline back ref */
779 struct btrfs_extent_inline_ref *iref;
780 iref = (struct btrfs_extent_inline_ref *)ptr;
781 key.type = btrfs_extent_inline_ref_type(eb, iref);
782 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
783 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
784 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
785 }
786
787 if (exist &&
788 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
789 exist->owner == key.offset) ||
790 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
791 exist->bytenr == key.offset))) {
792 exist = NULL;
793 goto next;
794 }
795
796#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
797 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
798 key.type == BTRFS_EXTENT_REF_V0_KEY) {
799 if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
800 struct btrfs_extent_ref_v0 *ref0;
801 ref0 = btrfs_item_ptr(eb, path1->slots[0],
802 struct btrfs_extent_ref_v0);
803 if (key.objectid == key.offset) {
804 root = find_tree_root(rc, eb, ref0);
805 if (root && !should_ignore_root(root))
806 cur->root = root;
807 else
808 list_add(&cur->list, &useless);
809 break;
810 }
811 if (is_cowonly_root(btrfs_ref_root_v0(eb,
812 ref0)))
813 cur->cowonly = 1;
814 }
815#else
816 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
817 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
818#endif
819 if (key.objectid == key.offset) {
820 /*
821 * only root blocks of reloc trees use
822 * backref of this type.
823 */
824 root = find_reloc_root(rc, cur->bytenr);
825 BUG_ON(!root);
826 cur->root = root;
827 break;
828 }
829
830 edge = alloc_backref_edge(cache);
831 if (!edge) {
832 err = -ENOMEM;
833 goto out;
834 }
835 rb_node = tree_search(&cache->rb_root, key.offset);
836 if (!rb_node) {
837 upper = alloc_backref_node(cache);
838 if (!upper) {
839 free_backref_edge(cache, edge);
840 err = -ENOMEM;
841 goto out;
842 }
843 upper->bytenr = key.offset;
844 upper->level = cur->level + 1;
845 /*
846 * backrefs for the upper level block isn't
847 * cached, add the block to pending list
848 */
849 list_add_tail(&edge->list[UPPER], &list);
850 } else {
851 upper = rb_entry(rb_node, struct backref_node,
852 rb_node);
853 BUG_ON(!upper->checked);
854 INIT_LIST_HEAD(&edge->list[UPPER]);
855 }
856 list_add_tail(&edge->list[LOWER], &cur->upper);
857 edge->node[LOWER] = cur;
858 edge->node[UPPER] = upper;
859
860 goto next;
861 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
862 goto next;
863 }
864
865 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
866 root = read_fs_root(rc->extent_root->fs_info, key.offset);
867 if (IS_ERR(root)) {
868 err = PTR_ERR(root);
869 goto out;
870 }
871
872 if (!root->ref_cows)
873 cur->cowonly = 1;
874
875 if (btrfs_root_level(&root->root_item) == cur->level) {
876 /* tree root */
877 BUG_ON(btrfs_root_bytenr(&root->root_item) !=
878 cur->bytenr);
879 if (should_ignore_root(root))
880 list_add(&cur->list, &useless);
881 else
882 cur->root = root;
883 break;
884 }
885
886 level = cur->level + 1;
887
888 /*
889 * searching the tree to find upper level blocks
890 * reference the block.
891 */
892 path2->search_commit_root = 1;
893 path2->skip_locking = 1;
894 path2->lowest_level = level;
895 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
896 path2->lowest_level = 0;
897 if (ret < 0) {
898 err = ret;
899 goto out;
900 }
901 if (ret > 0 && path2->slots[level] > 0)
902 path2->slots[level]--;
903
904 eb = path2->nodes[level];
905 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
906 cur->bytenr);
907
908 lower = cur;
909 for (; level < BTRFS_MAX_LEVEL; level++) {
910 if (!path2->nodes[level]) {
911 BUG_ON(btrfs_root_bytenr(&root->root_item) !=
912 lower->bytenr);
913 if (should_ignore_root(root))
914 list_add(&lower->list, &useless);
915 else
916 lower->root = root;
917 break;
918 }
919
920 edge = alloc_backref_edge(cache);
921 if (!edge) {
922 err = -ENOMEM;
923 goto out;
924 }
925
926 eb = path2->nodes[level];
927 rb_node = tree_search(&cache->rb_root, eb->start);
928 if (!rb_node) {
929 upper = alloc_backref_node(cache);
930 if (!upper) {
931 free_backref_edge(cache, edge);
932 err = -ENOMEM;
933 goto out;
934 }
935 upper->bytenr = eb->start;
936 upper->owner = btrfs_header_owner(eb);
937 upper->level = lower->level + 1;
938 if (!root->ref_cows)
939 upper->cowonly = 1;
940
941 /*
942 * if we know the block isn't shared
943 * we can void checking its backrefs.
944 */
945 if (btrfs_block_can_be_shared(root, eb))
946 upper->checked = 0;
947 else
948 upper->checked = 1;
949
950 /*
951 * add the block to pending list if we
952 * need check its backrefs. only block
953 * at 'cur->level + 1' is added to the
954 * tail of pending list. this guarantees
955 * we check backrefs from lower level
956 * blocks to upper level blocks.
957 */
958 if (!upper->checked &&
959 level == cur->level + 1) {
960 list_add_tail(&edge->list[UPPER],
961 &list);
962 } else
963 INIT_LIST_HEAD(&edge->list[UPPER]);
964 } else {
965 upper = rb_entry(rb_node, struct backref_node,
966 rb_node);
967 BUG_ON(!upper->checked);
968 INIT_LIST_HEAD(&edge->list[UPPER]);
969 if (!upper->owner)
970 upper->owner = btrfs_header_owner(eb);
971 }
972 list_add_tail(&edge->list[LOWER], &lower->upper);
973 edge->node[LOWER] = lower;
974 edge->node[UPPER] = upper;
975
976 if (rb_node)
977 break;
978 lower = upper;
979 upper = NULL;
980 }
981 btrfs_release_path(path2);
982next:
983 if (ptr < end) {
984 ptr += btrfs_extent_inline_ref_size(key.type);
985 if (ptr >= end) {
986 WARN_ON(ptr > end);
987 ptr = 0;
988 end = 0;
989 }
990 }
991 if (ptr >= end)
992 path1->slots[0]++;
993 }
994 btrfs_release_path(path1);
995
996 cur->checked = 1;
997 WARN_ON(exist);
998
999 /* the pending list isn't empty, take the first block to process */
1000 if (!list_empty(&list)) {
1001 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1002 list_del_init(&edge->list[UPPER]);
1003 cur = edge->node[UPPER];
1004 goto again;
1005 }
1006
1007 /*
1008 * everything goes well, connect backref nodes and insert backref nodes
1009 * into the cache.
1010 */
1011 BUG_ON(!node->checked);
1012 cowonly = node->cowonly;
1013 if (!cowonly) {
1014 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1015 &node->rb_node);
1016 if (rb_node)
1017 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1018 list_add_tail(&node->lower, &cache->leaves);
1019 }
1020
1021 list_for_each_entry(edge, &node->upper, list[LOWER])
1022 list_add_tail(&edge->list[UPPER], &list);
1023
1024 while (!list_empty(&list)) {
1025 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1026 list_del_init(&edge->list[UPPER]);
1027 upper = edge->node[UPPER];
1028 if (upper->detached) {
1029 list_del(&edge->list[LOWER]);
1030 lower = edge->node[LOWER];
1031 free_backref_edge(cache, edge);
1032 if (list_empty(&lower->upper))
1033 list_add(&lower->list, &useless);
1034 continue;
1035 }
1036
1037 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1038 if (upper->lowest) {
1039 list_del_init(&upper->lower);
1040 upper->lowest = 0;
1041 }
1042
1043 list_add_tail(&edge->list[UPPER], &upper->lower);
1044 continue;
1045 }
1046
1047 BUG_ON(!upper->checked);
1048 BUG_ON(cowonly != upper->cowonly);
1049 if (!cowonly) {
1050 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1051 &upper->rb_node);
1052 if (rb_node)
1053 backref_tree_panic(rb_node, -EEXIST,
1054 upper->bytenr);
1055 }
1056
1057 list_add_tail(&edge->list[UPPER], &upper->lower);
1058
1059 list_for_each_entry(edge, &upper->upper, list[LOWER])
1060 list_add_tail(&edge->list[UPPER], &list);
1061 }
1062 /*
1063 * process useless backref nodes. backref nodes for tree leaves
1064 * are deleted from the cache. backref nodes for upper level
1065 * tree blocks are left in the cache to avoid unnecessary backref
1066 * lookup.
1067 */
1068 while (!list_empty(&useless)) {
1069 upper = list_entry(useless.next, struct backref_node, list);
1070 list_del_init(&upper->list);
1071 BUG_ON(!list_empty(&upper->upper));
1072 if (upper == node)
1073 node = NULL;
1074 if (upper->lowest) {
1075 list_del_init(&upper->lower);
1076 upper->lowest = 0;
1077 }
1078 while (!list_empty(&upper->lower)) {
1079 edge = list_entry(upper->lower.next,
1080 struct backref_edge, list[UPPER]);
1081 list_del(&edge->list[UPPER]);
1082 list_del(&edge->list[LOWER]);
1083 lower = edge->node[LOWER];
1084 free_backref_edge(cache, edge);
1085
1086 if (list_empty(&lower->upper))
1087 list_add(&lower->list, &useless);
1088 }
1089 __mark_block_processed(rc, upper);
1090 if (upper->level > 0) {
1091 list_add(&upper->list, &cache->detached);
1092 upper->detached = 1;
1093 } else {
1094 rb_erase(&upper->rb_node, &cache->rb_root);
1095 free_backref_node(cache, upper);
1096 }
1097 }
1098out:
1099 btrfs_free_path(path1);
1100 btrfs_free_path(path2);
1101 if (err) {
1102 while (!list_empty(&useless)) {
1103 lower = list_entry(useless.next,
1104 struct backref_node, upper);
1105 list_del_init(&lower->upper);
1106 }
1107 upper = node;
1108 INIT_LIST_HEAD(&list);
1109 while (upper) {
1110 if (RB_EMPTY_NODE(&upper->rb_node)) {
1111 list_splice_tail(&upper->upper, &list);
1112 free_backref_node(cache, upper);
1113 }
1114
1115 if (list_empty(&list))
1116 break;
1117
1118 edge = list_entry(list.next, struct backref_edge,
1119 list[LOWER]);
1120 list_del(&edge->list[LOWER]);
1121 upper = edge->node[UPPER];
1122 free_backref_edge(cache, edge);
1123 }
1124 return ERR_PTR(err);
1125 }
1126 BUG_ON(node && node->detached);
1127 return node;
1128}
1129
1130/*
1131 * helper to add backref node for the newly created snapshot.
1132 * the backref node is created by cloning backref node that
1133 * corresponds to root of source tree
1134 */
1135static int clone_backref_node(struct btrfs_trans_handle *trans,
1136 struct reloc_control *rc,
1137 struct btrfs_root *src,
1138 struct btrfs_root *dest)
1139{
1140 struct btrfs_root *reloc_root = src->reloc_root;
1141 struct backref_cache *cache = &rc->backref_cache;
1142 struct backref_node *node = NULL;
1143 struct backref_node *new_node;
1144 struct backref_edge *edge;
1145 struct backref_edge *new_edge;
1146 struct rb_node *rb_node;
1147
1148 if (cache->last_trans > 0)
1149 update_backref_cache(trans, cache);
1150
1151 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1152 if (rb_node) {
1153 node = rb_entry(rb_node, struct backref_node, rb_node);
1154 if (node->detached)
1155 node = NULL;
1156 else
1157 BUG_ON(node->new_bytenr != reloc_root->node->start);
1158 }
1159
1160 if (!node) {
1161 rb_node = tree_search(&cache->rb_root,
1162 reloc_root->commit_root->start);
1163 if (rb_node) {
1164 node = rb_entry(rb_node, struct backref_node,
1165 rb_node);
1166 BUG_ON(node->detached);
1167 }
1168 }
1169
1170 if (!node)
1171 return 0;
1172
1173 new_node = alloc_backref_node(cache);
1174 if (!new_node)
1175 return -ENOMEM;
1176
1177 new_node->bytenr = dest->node->start;
1178 new_node->level = node->level;
1179 new_node->lowest = node->lowest;
1180 new_node->checked = 1;
1181 new_node->root = dest;
1182
1183 if (!node->lowest) {
1184 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1185 new_edge = alloc_backref_edge(cache);
1186 if (!new_edge)
1187 goto fail;
1188
1189 new_edge->node[UPPER] = new_node;
1190 new_edge->node[LOWER] = edge->node[LOWER];
1191 list_add_tail(&new_edge->list[UPPER],
1192 &new_node->lower);
1193 }
1194 } else {
1195 list_add_tail(&new_node->lower, &cache->leaves);
1196 }
1197
1198 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1199 &new_node->rb_node);
1200 if (rb_node)
1201 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1202
1203 if (!new_node->lowest) {
1204 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1205 list_add_tail(&new_edge->list[LOWER],
1206 &new_edge->node[LOWER]->upper);
1207 }
1208 }
1209 return 0;
1210fail:
1211 while (!list_empty(&new_node->lower)) {
1212 new_edge = list_entry(new_node->lower.next,
1213 struct backref_edge, list[UPPER]);
1214 list_del(&new_edge->list[UPPER]);
1215 free_backref_edge(cache, new_edge);
1216 }
1217 free_backref_node(cache, new_node);
1218 return -ENOMEM;
1219}
1220
1221/*
1222 * helper to add 'address of tree root -> reloc tree' mapping
1223 */
1224static int __must_check __add_reloc_root(struct btrfs_root *root)
1225{
1226 struct rb_node *rb_node;
1227 struct mapping_node *node;
1228 struct reloc_control *rc = root->fs_info->reloc_ctl;
1229
1230 node = kmalloc(sizeof(*node), GFP_NOFS);
1231 if (!node)
1232 return -ENOMEM;
1233
1234 node->bytenr = root->node->start;
1235 node->data = root;
1236
1237 spin_lock(&rc->reloc_root_tree.lock);
1238 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1239 node->bytenr, &node->rb_node);
1240 spin_unlock(&rc->reloc_root_tree.lock);
1241 if (rb_node) {
1242 kfree(node);
1243 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
1244 "for start=%llu while inserting into relocation "
1245 "tree\n");
1246 }
1247
1248 list_add_tail(&root->root_list, &rc->reloc_roots);
1249 return 0;
1250}
1251
1252/*
1253 * helper to update/delete the 'address of tree root -> reloc tree'
1254 * mapping
1255 */
1256static int __update_reloc_root(struct btrfs_root *root, int del)
1257{
1258 struct rb_node *rb_node;
1259 struct mapping_node *node = NULL;
1260 struct reloc_control *rc = root->fs_info->reloc_ctl;
1261
1262 spin_lock(&rc->reloc_root_tree.lock);
1263 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1264 root->commit_root->start);
1265 if (rb_node) {
1266 node = rb_entry(rb_node, struct mapping_node, rb_node);
1267 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1268 }
1269 spin_unlock(&rc->reloc_root_tree.lock);
1270
1271 BUG_ON((struct btrfs_root *)node->data != root);
1272
1273 if (!del) {
1274 spin_lock(&rc->reloc_root_tree.lock);
1275 node->bytenr = root->node->start;
1276 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1277 node->bytenr, &node->rb_node);
1278 spin_unlock(&rc->reloc_root_tree.lock);
1279 if (rb_node)
1280 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1281 } else {
1282 spin_lock(&root->fs_info->trans_lock);
1283 list_del_init(&root->root_list);
1284 spin_unlock(&root->fs_info->trans_lock);
1285 kfree(node);
1286 }
1287 return 0;
1288}
1289
1290static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1291 struct btrfs_root *root, u64 objectid)
1292{
1293 struct btrfs_root *reloc_root;
1294 struct extent_buffer *eb;
1295 struct btrfs_root_item *root_item;
1296 struct btrfs_key root_key;
1297 int ret;
1298
1299 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1300 BUG_ON(!root_item);
1301
1302 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1303 root_key.type = BTRFS_ROOT_ITEM_KEY;
1304 root_key.offset = objectid;
1305
1306 if (root->root_key.objectid == objectid) {
1307 /* called by btrfs_init_reloc_root */
1308 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1309 BTRFS_TREE_RELOC_OBJECTID);
1310 BUG_ON(ret);
1311
1312 btrfs_set_root_last_snapshot(&root->root_item,
1313 trans->transid - 1);
1314 } else {
1315 /*
1316 * called by btrfs_reloc_post_snapshot_hook.
1317 * the source tree is a reloc tree, all tree blocks
1318 * modified after it was created have RELOC flag
1319 * set in their headers. so it's OK to not update
1320 * the 'last_snapshot'.
1321 */
1322 ret = btrfs_copy_root(trans, root, root->node, &eb,
1323 BTRFS_TREE_RELOC_OBJECTID);
1324 BUG_ON(ret);
1325 }
1326
1327 memcpy(root_item, &root->root_item, sizeof(*root_item));
1328 btrfs_set_root_bytenr(root_item, eb->start);
1329 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1330 btrfs_set_root_generation(root_item, trans->transid);
1331
1332 if (root->root_key.objectid == objectid) {
1333 btrfs_set_root_refs(root_item, 0);
1334 memset(&root_item->drop_progress, 0,
1335 sizeof(struct btrfs_disk_key));
1336 root_item->drop_level = 0;
1337 }
1338
1339 btrfs_tree_unlock(eb);
1340 free_extent_buffer(eb);
1341
1342 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
1343 &root_key, root_item);
1344 BUG_ON(ret);
1345 kfree(root_item);
1346
1347 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
1348 &root_key);
1349 BUG_ON(IS_ERR(reloc_root));
1350 reloc_root->last_trans = trans->transid;
1351 return reloc_root;
1352}
1353
1354/*
1355 * create reloc tree for a given fs tree. reloc tree is just a
1356 * snapshot of the fs tree with special root objectid.
1357 */
1358int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1359 struct btrfs_root *root)
1360{
1361 struct btrfs_root *reloc_root;
1362 struct reloc_control *rc = root->fs_info->reloc_ctl;
1363 int clear_rsv = 0;
1364 int ret;
1365
1366 if (root->reloc_root) {
1367 reloc_root = root->reloc_root;
1368 reloc_root->last_trans = trans->transid;
1369 return 0;
1370 }
1371
1372 if (!rc || !rc->create_reloc_tree ||
1373 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1374 return 0;
1375
1376 if (!trans->block_rsv) {
1377 trans->block_rsv = rc->block_rsv;
1378 clear_rsv = 1;
1379 }
1380 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1381 if (clear_rsv)
1382 trans->block_rsv = NULL;
1383
1384 ret = __add_reloc_root(reloc_root);
1385 BUG_ON(ret < 0);
1386 root->reloc_root = reloc_root;
1387 return 0;
1388}
1389
1390/*
1391 * update root item of reloc tree
1392 */
1393int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1394 struct btrfs_root *root)
1395{
1396 struct btrfs_root *reloc_root;
1397 struct btrfs_root_item *root_item;
1398 int del = 0;
1399 int ret;
1400
1401 if (!root->reloc_root)
1402 goto out;
1403
1404 reloc_root = root->reloc_root;
1405 root_item = &reloc_root->root_item;
1406
1407 if (root->fs_info->reloc_ctl->merge_reloc_tree &&
1408 btrfs_root_refs(root_item) == 0) {
1409 root->reloc_root = NULL;
1410 del = 1;
1411 }
1412
1413 __update_reloc_root(reloc_root, del);
1414
1415 if (reloc_root->commit_root != reloc_root->node) {
1416 btrfs_set_root_node(root_item, reloc_root->node);
1417 free_extent_buffer(reloc_root->commit_root);
1418 reloc_root->commit_root = btrfs_root_node(reloc_root);
1419 }
1420
1421 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1422 &reloc_root->root_key, root_item);
1423 BUG_ON(ret);
1424
1425out:
1426 return 0;
1427}
1428
1429/*
1430 * helper to find first cached inode with inode number >= objectid
1431 * in a subvolume
1432 */
1433static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1434{
1435 struct rb_node *node;
1436 struct rb_node *prev;
1437 struct btrfs_inode *entry;
1438 struct inode *inode;
1439
1440 spin_lock(&root->inode_lock);
1441again:
1442 node = root->inode_tree.rb_node;
1443 prev = NULL;
1444 while (node) {
1445 prev = node;
1446 entry = rb_entry(node, struct btrfs_inode, rb_node);
1447
1448 if (objectid < btrfs_ino(&entry->vfs_inode))
1449 node = node->rb_left;
1450 else if (objectid > btrfs_ino(&entry->vfs_inode))
1451 node = node->rb_right;
1452 else
1453 break;
1454 }
1455 if (!node) {
1456 while (prev) {
1457 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1458 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
1459 node = prev;
1460 break;
1461 }
1462 prev = rb_next(prev);
1463 }
1464 }
1465 while (node) {
1466 entry = rb_entry(node, struct btrfs_inode, rb_node);
1467 inode = igrab(&entry->vfs_inode);
1468 if (inode) {
1469 spin_unlock(&root->inode_lock);
1470 return inode;
1471 }
1472
1473 objectid = btrfs_ino(&entry->vfs_inode) + 1;
1474 if (cond_resched_lock(&root->inode_lock))
1475 goto again;
1476
1477 node = rb_next(node);
1478 }
1479 spin_unlock(&root->inode_lock);
1480 return NULL;
1481}
1482
1483static int in_block_group(u64 bytenr,
1484 struct btrfs_block_group_cache *block_group)
1485{
1486 if (bytenr >= block_group->key.objectid &&
1487 bytenr < block_group->key.objectid + block_group->key.offset)
1488 return 1;
1489 return 0;
1490}
1491
1492/*
1493 * get new location of data
1494 */
1495static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1496 u64 bytenr, u64 num_bytes)
1497{
1498 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1499 struct btrfs_path *path;
1500 struct btrfs_file_extent_item *fi;
1501 struct extent_buffer *leaf;
1502 int ret;
1503
1504 path = btrfs_alloc_path();
1505 if (!path)
1506 return -ENOMEM;
1507
1508 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1509 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
1510 bytenr, 0);
1511 if (ret < 0)
1512 goto out;
1513 if (ret > 0) {
1514 ret = -ENOENT;
1515 goto out;
1516 }
1517
1518 leaf = path->nodes[0];
1519 fi = btrfs_item_ptr(leaf, path->slots[0],
1520 struct btrfs_file_extent_item);
1521
1522 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1523 btrfs_file_extent_compression(leaf, fi) ||
1524 btrfs_file_extent_encryption(leaf, fi) ||
1525 btrfs_file_extent_other_encoding(leaf, fi));
1526
1527 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1528 ret = 1;
1529 goto out;
1530 }
1531
1532 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1533 ret = 0;
1534out:
1535 btrfs_free_path(path);
1536 return ret;
1537}
1538
1539/*
1540 * update file extent items in the tree leaf to point to
1541 * the new locations.
1542 */
1543static noinline_for_stack
1544int replace_file_extents(struct btrfs_trans_handle *trans,
1545 struct reloc_control *rc,
1546 struct btrfs_root *root,
1547 struct extent_buffer *leaf)
1548{
1549 struct btrfs_key key;
1550 struct btrfs_file_extent_item *fi;
1551 struct inode *inode = NULL;
1552 u64 parent;
1553 u64 bytenr;
1554 u64 new_bytenr = 0;
1555 u64 num_bytes;
1556 u64 end;
1557 u32 nritems;
1558 u32 i;
1559 int ret;
1560 int first = 1;
1561 int dirty = 0;
1562
1563 if (rc->stage != UPDATE_DATA_PTRS)
1564 return 0;
1565
1566 /* reloc trees always use full backref */
1567 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1568 parent = leaf->start;
1569 else
1570 parent = 0;
1571
1572 nritems = btrfs_header_nritems(leaf);
1573 for (i = 0; i < nritems; i++) {
1574 cond_resched();
1575 btrfs_item_key_to_cpu(leaf, &key, i);
1576 if (key.type != BTRFS_EXTENT_DATA_KEY)
1577 continue;
1578 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1579 if (btrfs_file_extent_type(leaf, fi) ==
1580 BTRFS_FILE_EXTENT_INLINE)
1581 continue;
1582 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1583 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1584 if (bytenr == 0)
1585 continue;
1586 if (!in_block_group(bytenr, rc->block_group))
1587 continue;
1588
1589 /*
1590 * if we are modifying block in fs tree, wait for readpage
1591 * to complete and drop the extent cache
1592 */
1593 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1594 if (first) {
1595 inode = find_next_inode(root, key.objectid);
1596 first = 0;
1597 } else if (inode && btrfs_ino(inode) < key.objectid) {
1598 btrfs_add_delayed_iput(inode);
1599 inode = find_next_inode(root, key.objectid);
1600 }
1601 if (inode && btrfs_ino(inode) == key.objectid) {
1602 end = key.offset +
1603 btrfs_file_extent_num_bytes(leaf, fi);
1604 WARN_ON(!IS_ALIGNED(key.offset,
1605 root->sectorsize));
1606 WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1607 end--;
1608 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1609 key.offset, end);
1610 if (!ret)
1611 continue;
1612
1613 btrfs_drop_extent_cache(inode, key.offset, end,
1614 1);
1615 unlock_extent(&BTRFS_I(inode)->io_tree,
1616 key.offset, end);
1617 }
1618 }
1619
1620 ret = get_new_location(rc->data_inode, &new_bytenr,
1621 bytenr, num_bytes);
1622 if (ret > 0) {
1623 WARN_ON(1);
1624 continue;
1625 }
1626 BUG_ON(ret < 0);
1627
1628 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1629 dirty = 1;
1630
1631 key.offset -= btrfs_file_extent_offset(leaf, fi);
1632 ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
1633 num_bytes, parent,
1634 btrfs_header_owner(leaf),
1635 key.objectid, key.offset, 1);
1636 BUG_ON(ret);
1637
1638 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1639 parent, btrfs_header_owner(leaf),
1640 key.objectid, key.offset, 1);
1641 BUG_ON(ret);
1642 }
1643 if (dirty)
1644 btrfs_mark_buffer_dirty(leaf);
1645 if (inode)
1646 btrfs_add_delayed_iput(inode);
1647 return 0;
1648}
1649
1650static noinline_for_stack
1651int memcmp_node_keys(struct extent_buffer *eb, int slot,
1652 struct btrfs_path *path, int level)
1653{
1654 struct btrfs_disk_key key1;
1655 struct btrfs_disk_key key2;
1656 btrfs_node_key(eb, &key1, slot);
1657 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1658 return memcmp(&key1, &key2, sizeof(key1));
1659}
1660
1661/*
1662 * try to replace tree blocks in fs tree with the new blocks
1663 * in reloc tree. tree blocks haven't been modified since the
1664 * reloc tree was create can be replaced.
1665 *
1666 * if a block was replaced, level of the block + 1 is returned.
1667 * if no block got replaced, 0 is returned. if there are other
1668 * errors, a negative error number is returned.
1669 */
1670static noinline_for_stack
1671int replace_path(struct btrfs_trans_handle *trans,
1672 struct btrfs_root *dest, struct btrfs_root *src,
1673 struct btrfs_path *path, struct btrfs_key *next_key,
1674 int lowest_level, int max_level)
1675{
1676 struct extent_buffer *eb;
1677 struct extent_buffer *parent;
1678 struct btrfs_key key;
1679 u64 old_bytenr;
1680 u64 new_bytenr;
1681 u64 old_ptr_gen;
1682 u64 new_ptr_gen;
1683 u64 last_snapshot;
1684 u32 blocksize;
1685 int cow = 0;
1686 int level;
1687 int ret;
1688 int slot;
1689
1690 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1691 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1692
1693 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1694again:
1695 slot = path->slots[lowest_level];
1696 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1697
1698 eb = btrfs_lock_root_node(dest);
1699 btrfs_set_lock_blocking(eb);
1700 level = btrfs_header_level(eb);
1701
1702 if (level < lowest_level) {
1703 btrfs_tree_unlock(eb);
1704 free_extent_buffer(eb);
1705 return 0;
1706 }
1707
1708 if (cow) {
1709 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1710 BUG_ON(ret);
1711 }
1712 btrfs_set_lock_blocking(eb);
1713
1714 if (next_key) {
1715 next_key->objectid = (u64)-1;
1716 next_key->type = (u8)-1;
1717 next_key->offset = (u64)-1;
1718 }
1719
1720 parent = eb;
1721 while (1) {
1722 level = btrfs_header_level(parent);
1723 BUG_ON(level < lowest_level);
1724
1725 ret = btrfs_bin_search(parent, &key, level, &slot);
1726 if (ret && slot > 0)
1727 slot--;
1728
1729 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1730 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1731
1732 old_bytenr = btrfs_node_blockptr(parent, slot);
1733 blocksize = btrfs_level_size(dest, level - 1);
1734 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1735
1736 if (level <= max_level) {
1737 eb = path->nodes[level];
1738 new_bytenr = btrfs_node_blockptr(eb,
1739 path->slots[level]);
1740 new_ptr_gen = btrfs_node_ptr_generation(eb,
1741 path->slots[level]);
1742 } else {
1743 new_bytenr = 0;
1744 new_ptr_gen = 0;
1745 }
1746
1747 if (new_bytenr > 0 && new_bytenr == old_bytenr) {
1748 WARN_ON(1);
1749 ret = level;
1750 break;
1751 }
1752
1753 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1754 memcmp_node_keys(parent, slot, path, level)) {
1755 if (level <= lowest_level) {
1756 ret = 0;
1757 break;
1758 }
1759
1760 eb = read_tree_block(dest, old_bytenr, blocksize,
1761 old_ptr_gen);
1762 BUG_ON(!eb);
1763 btrfs_tree_lock(eb);
1764 if (cow) {
1765 ret = btrfs_cow_block(trans, dest, eb, parent,
1766 slot, &eb);
1767 BUG_ON(ret);
1768 }
1769 btrfs_set_lock_blocking(eb);
1770
1771 btrfs_tree_unlock(parent);
1772 free_extent_buffer(parent);
1773
1774 parent = eb;
1775 continue;
1776 }
1777
1778 if (!cow) {
1779 btrfs_tree_unlock(parent);
1780 free_extent_buffer(parent);
1781 cow = 1;
1782 goto again;
1783 }
1784
1785 btrfs_node_key_to_cpu(path->nodes[level], &key,
1786 path->slots[level]);
1787 btrfs_release_path(path);
1788
1789 path->lowest_level = level;
1790 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1791 path->lowest_level = 0;
1792 BUG_ON(ret);
1793
1794 /*
1795 * swap blocks in fs tree and reloc tree.
1796 */
1797 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1798 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1799 btrfs_mark_buffer_dirty(parent);
1800
1801 btrfs_set_node_blockptr(path->nodes[level],
1802 path->slots[level], old_bytenr);
1803 btrfs_set_node_ptr_generation(path->nodes[level],
1804 path->slots[level], old_ptr_gen);
1805 btrfs_mark_buffer_dirty(path->nodes[level]);
1806
1807 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
1808 path->nodes[level]->start,
1809 src->root_key.objectid, level - 1, 0,
1810 1);
1811 BUG_ON(ret);
1812 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
1813 0, dest->root_key.objectid, level - 1,
1814 0, 1);
1815 BUG_ON(ret);
1816
1817 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
1818 path->nodes[level]->start,
1819 src->root_key.objectid, level - 1, 0,
1820 1);
1821 BUG_ON(ret);
1822
1823 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
1824 0, dest->root_key.objectid, level - 1,
1825 0, 1);
1826 BUG_ON(ret);
1827
1828 btrfs_unlock_up_safe(path, 0);
1829
1830 ret = level;
1831 break;
1832 }
1833 btrfs_tree_unlock(parent);
1834 free_extent_buffer(parent);
1835 return ret;
1836}
1837
1838/*
1839 * helper to find next relocated block in reloc tree
1840 */
1841static noinline_for_stack
1842int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1843 int *level)
1844{
1845 struct extent_buffer *eb;
1846 int i;
1847 u64 last_snapshot;
1848 u32 nritems;
1849
1850 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1851
1852 for (i = 0; i < *level; i++) {
1853 free_extent_buffer(path->nodes[i]);
1854 path->nodes[i] = NULL;
1855 }
1856
1857 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1858 eb = path->nodes[i];
1859 nritems = btrfs_header_nritems(eb);
1860 while (path->slots[i] + 1 < nritems) {
1861 path->slots[i]++;
1862 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1863 last_snapshot)
1864 continue;
1865
1866 *level = i;
1867 return 0;
1868 }
1869 free_extent_buffer(path->nodes[i]);
1870 path->nodes[i] = NULL;
1871 }
1872 return 1;
1873}
1874
1875/*
1876 * walk down reloc tree to find relocated block of lowest level
1877 */
1878static noinline_for_stack
1879int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1880 int *level)
1881{
1882 struct extent_buffer *eb = NULL;
1883 int i;
1884 u64 bytenr;
1885 u64 ptr_gen = 0;
1886 u64 last_snapshot;
1887 u32 blocksize;
1888 u32 nritems;
1889
1890 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1891
1892 for (i = *level; i > 0; i--) {
1893 eb = path->nodes[i];
1894 nritems = btrfs_header_nritems(eb);
1895 while (path->slots[i] < nritems) {
1896 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1897 if (ptr_gen > last_snapshot)
1898 break;
1899 path->slots[i]++;
1900 }
1901 if (path->slots[i] >= nritems) {
1902 if (i == *level)
1903 break;
1904 *level = i + 1;
1905 return 0;
1906 }
1907 if (i == 1) {
1908 *level = i;
1909 return 0;
1910 }
1911
1912 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
1913 blocksize = btrfs_level_size(root, i - 1);
1914 eb = read_tree_block(root, bytenr, blocksize, ptr_gen);
1915 BUG_ON(btrfs_header_level(eb) != i - 1);
1916 path->nodes[i - 1] = eb;
1917 path->slots[i - 1] = 0;
1918 }
1919 return 1;
1920}
1921
1922/*
1923 * invalidate extent cache for file extents whose key in range of
1924 * [min_key, max_key)
1925 */
1926static int invalidate_extent_cache(struct btrfs_root *root,
1927 struct btrfs_key *min_key,
1928 struct btrfs_key *max_key)
1929{
1930 struct inode *inode = NULL;
1931 u64 objectid;
1932 u64 start, end;
1933 u64 ino;
1934
1935 objectid = min_key->objectid;
1936 while (1) {
1937 cond_resched();
1938 iput(inode);
1939
1940 if (objectid > max_key->objectid)
1941 break;
1942
1943 inode = find_next_inode(root, objectid);
1944 if (!inode)
1945 break;
1946 ino = btrfs_ino(inode);
1947
1948 if (ino > max_key->objectid) {
1949 iput(inode);
1950 break;
1951 }
1952
1953 objectid = ino + 1;
1954 if (!S_ISREG(inode->i_mode))
1955 continue;
1956
1957 if (unlikely(min_key->objectid == ino)) {
1958 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1959 continue;
1960 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1961 start = 0;
1962 else {
1963 start = min_key->offset;
1964 WARN_ON(!IS_ALIGNED(start, root->sectorsize));
1965 }
1966 } else {
1967 start = 0;
1968 }
1969
1970 if (unlikely(max_key->objectid == ino)) {
1971 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1972 continue;
1973 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1974 end = (u64)-1;
1975 } else {
1976 if (max_key->offset == 0)
1977 continue;
1978 end = max_key->offset;
1979 WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1980 end--;
1981 }
1982 } else {
1983 end = (u64)-1;
1984 }
1985
1986 /* the lock_extent waits for readpage to complete */
1987 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
1988 btrfs_drop_extent_cache(inode, start, end, 1);
1989 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
1990 }
1991 return 0;
1992}
1993
1994static int find_next_key(struct btrfs_path *path, int level,
1995 struct btrfs_key *key)
1996
1997{
1998 while (level < BTRFS_MAX_LEVEL) {
1999 if (!path->nodes[level])
2000 break;
2001 if (path->slots[level] + 1 <
2002 btrfs_header_nritems(path->nodes[level])) {
2003 btrfs_node_key_to_cpu(path->nodes[level], key,
2004 path->slots[level] + 1);
2005 return 0;
2006 }
2007 level++;
2008 }
2009 return 1;
2010}
2011
2012/*
2013 * merge the relocated tree blocks in reloc tree with corresponding
2014 * fs tree.
2015 */
2016static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2017 struct btrfs_root *root)
2018{
2019 LIST_HEAD(inode_list);
2020 struct btrfs_key key;
2021 struct btrfs_key next_key;
2022 struct btrfs_trans_handle *trans;
2023 struct btrfs_root *reloc_root;
2024 struct btrfs_root_item *root_item;
2025 struct btrfs_path *path;
2026 struct extent_buffer *leaf;
2027 unsigned long nr;
2028 int level;
2029 int max_level;
2030 int replaced = 0;
2031 int ret;
2032 int err = 0;
2033 u32 min_reserved;
2034
2035 path = btrfs_alloc_path();
2036 if (!path)
2037 return -ENOMEM;
2038 path->reada = 1;
2039
2040 reloc_root = root->reloc_root;
2041 root_item = &reloc_root->root_item;
2042
2043 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2044 level = btrfs_root_level(root_item);
2045 extent_buffer_get(reloc_root->node);
2046 path->nodes[level] = reloc_root->node;
2047 path->slots[level] = 0;
2048 } else {
2049 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2050
2051 level = root_item->drop_level;
2052 BUG_ON(level == 0);
2053 path->lowest_level = level;
2054 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2055 path->lowest_level = 0;
2056 if (ret < 0) {
2057 btrfs_free_path(path);
2058 return ret;
2059 }
2060
2061 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2062 path->slots[level]);
2063 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2064
2065 btrfs_unlock_up_safe(path, 0);
2066 }
2067
2068 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2069 memset(&next_key, 0, sizeof(next_key));
2070
2071 while (1) {
2072 trans = btrfs_start_transaction(root, 0);
2073 BUG_ON(IS_ERR(trans));
2074 trans->block_rsv = rc->block_rsv;
2075
2076 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved);
2077 if (ret) {
2078 BUG_ON(ret != -EAGAIN);
2079 ret = btrfs_commit_transaction(trans, root);
2080 BUG_ON(ret);
2081 continue;
2082 }
2083
2084 replaced = 0;
2085 max_level = level;
2086
2087 ret = walk_down_reloc_tree(reloc_root, path, &level);
2088 if (ret < 0) {
2089 err = ret;
2090 goto out;
2091 }
2092 if (ret > 0)
2093 break;
2094
2095 if (!find_next_key(path, level, &key) &&
2096 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2097 ret = 0;
2098 } else {
2099 ret = replace_path(trans, root, reloc_root, path,
2100 &next_key, level, max_level);
2101 }
2102 if (ret < 0) {
2103 err = ret;
2104 goto out;
2105 }
2106
2107 if (ret > 0) {
2108 level = ret;
2109 btrfs_node_key_to_cpu(path->nodes[level], &key,
2110 path->slots[level]);
2111 replaced = 1;
2112 }
2113
2114 ret = walk_up_reloc_tree(reloc_root, path, &level);
2115 if (ret > 0)
2116 break;
2117
2118 BUG_ON(level == 0);
2119 /*
2120 * save the merging progress in the drop_progress.
2121 * this is OK since root refs == 1 in this case.
2122 */
2123 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2124 path->slots[level]);
2125 root_item->drop_level = level;
2126
2127 nr = trans->blocks_used;
2128 btrfs_end_transaction_throttle(trans, root);
2129
2130 btrfs_btree_balance_dirty(root, nr);
2131
2132 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2133 invalidate_extent_cache(root, &key, &next_key);
2134 }
2135
2136 /*
2137 * handle the case only one block in the fs tree need to be
2138 * relocated and the block is tree root.
2139 */
2140 leaf = btrfs_lock_root_node(root);
2141 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2142 btrfs_tree_unlock(leaf);
2143 free_extent_buffer(leaf);
2144 if (ret < 0)
2145 err = ret;
2146out:
2147 btrfs_free_path(path);
2148
2149 if (err == 0) {
2150 memset(&root_item->drop_progress, 0,
2151 sizeof(root_item->drop_progress));
2152 root_item->drop_level = 0;
2153 btrfs_set_root_refs(root_item, 0);
2154 btrfs_update_reloc_root(trans, root);
2155 }
2156
2157 nr = trans->blocks_used;
2158 btrfs_end_transaction_throttle(trans, root);
2159
2160 btrfs_btree_balance_dirty(root, nr);
2161
2162 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2163 invalidate_extent_cache(root, &key, &next_key);
2164
2165 return err;
2166}
2167
2168static noinline_for_stack
2169int prepare_to_merge(struct reloc_control *rc, int err)
2170{
2171 struct btrfs_root *root = rc->extent_root;
2172 struct btrfs_root *reloc_root;
2173 struct btrfs_trans_handle *trans;
2174 LIST_HEAD(reloc_roots);
2175 u64 num_bytes = 0;
2176 int ret;
2177
2178 mutex_lock(&root->fs_info->reloc_mutex);
2179 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2180 rc->merging_rsv_size += rc->nodes_relocated * 2;
2181 mutex_unlock(&root->fs_info->reloc_mutex);
2182
2183again:
2184 if (!err) {
2185 num_bytes = rc->merging_rsv_size;
2186 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
2187 if (ret)
2188 err = ret;
2189 }
2190
2191 trans = btrfs_join_transaction(rc->extent_root);
2192 if (IS_ERR(trans)) {
2193 if (!err)
2194 btrfs_block_rsv_release(rc->extent_root,
2195 rc->block_rsv, num_bytes);
2196 return PTR_ERR(trans);
2197 }
2198
2199 if (!err) {
2200 if (num_bytes != rc->merging_rsv_size) {
2201 btrfs_end_transaction(trans, rc->extent_root);
2202 btrfs_block_rsv_release(rc->extent_root,
2203 rc->block_rsv, num_bytes);
2204 goto again;
2205 }
2206 }
2207
2208 rc->merge_reloc_tree = 1;
2209
2210 while (!list_empty(&rc->reloc_roots)) {
2211 reloc_root = list_entry(rc->reloc_roots.next,
2212 struct btrfs_root, root_list);
2213 list_del_init(&reloc_root->root_list);
2214
2215 root = read_fs_root(reloc_root->fs_info,
2216 reloc_root->root_key.offset);
2217 BUG_ON(IS_ERR(root));
2218 BUG_ON(root->reloc_root != reloc_root);
2219
2220 /*
2221 * set reference count to 1, so btrfs_recover_relocation
2222 * knows it should resumes merging
2223 */
2224 if (!err)
2225 btrfs_set_root_refs(&reloc_root->root_item, 1);
2226 btrfs_update_reloc_root(trans, root);
2227
2228 list_add(&reloc_root->root_list, &reloc_roots);
2229 }
2230
2231 list_splice(&reloc_roots, &rc->reloc_roots);
2232
2233 if (!err)
2234 btrfs_commit_transaction(trans, rc->extent_root);
2235 else
2236 btrfs_end_transaction(trans, rc->extent_root);
2237 return err;
2238}
2239
2240static noinline_for_stack
2241int merge_reloc_roots(struct reloc_control *rc)
2242{
2243 struct btrfs_root *root;
2244 struct btrfs_root *reloc_root;
2245 LIST_HEAD(reloc_roots);
2246 int found = 0;
2247 int ret;
2248again:
2249 root = rc->extent_root;
2250
2251 /*
2252 * this serializes us with btrfs_record_root_in_transaction,
2253 * we have to make sure nobody is in the middle of
2254 * adding their roots to the list while we are
2255 * doing this splice
2256 */
2257 mutex_lock(&root->fs_info->reloc_mutex);
2258 list_splice_init(&rc->reloc_roots, &reloc_roots);
2259 mutex_unlock(&root->fs_info->reloc_mutex);
2260
2261 while (!list_empty(&reloc_roots)) {
2262 found = 1;
2263 reloc_root = list_entry(reloc_roots.next,
2264 struct btrfs_root, root_list);
2265
2266 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2267 root = read_fs_root(reloc_root->fs_info,
2268 reloc_root->root_key.offset);
2269 BUG_ON(IS_ERR(root));
2270 BUG_ON(root->reloc_root != reloc_root);
2271
2272 ret = merge_reloc_root(rc, root);
2273 BUG_ON(ret);
2274 } else {
2275 list_del_init(&reloc_root->root_list);
2276 }
2277 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
2278 BUG_ON(ret < 0);
2279 }
2280
2281 if (found) {
2282 found = 0;
2283 goto again;
2284 }
2285 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2286 return 0;
2287}
2288
2289static void free_block_list(struct rb_root *blocks)
2290{
2291 struct tree_block *block;
2292 struct rb_node *rb_node;
2293 while ((rb_node = rb_first(blocks))) {
2294 block = rb_entry(rb_node, struct tree_block, rb_node);
2295 rb_erase(rb_node, blocks);
2296 kfree(block);
2297 }
2298}
2299
2300static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2301 struct btrfs_root *reloc_root)
2302{
2303 struct btrfs_root *root;
2304
2305 if (reloc_root->last_trans == trans->transid)
2306 return 0;
2307
2308 root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
2309 BUG_ON(IS_ERR(root));
2310 BUG_ON(root->reloc_root != reloc_root);
2311
2312 return btrfs_record_root_in_trans(trans, root);
2313}
2314
2315static noinline_for_stack
2316struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2317 struct reloc_control *rc,
2318 struct backref_node *node,
2319 struct backref_edge *edges[], int *nr)
2320{
2321 struct backref_node *next;
2322 struct btrfs_root *root;
2323 int index = 0;
2324
2325 next = node;
2326 while (1) {
2327 cond_resched();
2328 next = walk_up_backref(next, edges, &index);
2329 root = next->root;
2330 BUG_ON(!root);
2331 BUG_ON(!root->ref_cows);
2332
2333 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2334 record_reloc_root_in_trans(trans, root);
2335 break;
2336 }
2337
2338 btrfs_record_root_in_trans(trans, root);
2339 root = root->reloc_root;
2340
2341 if (next->new_bytenr != root->node->start) {
2342 BUG_ON(next->new_bytenr);
2343 BUG_ON(!list_empty(&next->list));
2344 next->new_bytenr = root->node->start;
2345 next->root = root;
2346 list_add_tail(&next->list,
2347 &rc->backref_cache.changed);
2348 __mark_block_processed(rc, next);
2349 break;
2350 }
2351
2352 WARN_ON(1);
2353 root = NULL;
2354 next = walk_down_backref(edges, &index);
2355 if (!next || next->level <= node->level)
2356 break;
2357 }
2358 if (!root)
2359 return NULL;
2360
2361 *nr = index;
2362 next = node;
2363 /* setup backref node path for btrfs_reloc_cow_block */
2364 while (1) {
2365 rc->backref_cache.path[next->level] = next;
2366 if (--index < 0)
2367 break;
2368 next = edges[index]->node[UPPER];
2369 }
2370 return root;
2371}
2372
2373/*
2374 * select a tree root for relocation. return NULL if the block
2375 * is reference counted. we should use do_relocation() in this
2376 * case. return a tree root pointer if the block isn't reference
2377 * counted. return -ENOENT if the block is root of reloc tree.
2378 */
2379static noinline_for_stack
2380struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
2381 struct backref_node *node)
2382{
2383 struct backref_node *next;
2384 struct btrfs_root *root;
2385 struct btrfs_root *fs_root = NULL;
2386 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2387 int index = 0;
2388
2389 next = node;
2390 while (1) {
2391 cond_resched();
2392 next = walk_up_backref(next, edges, &index);
2393 root = next->root;
2394 BUG_ON(!root);
2395
2396 /* no other choice for non-references counted tree */
2397 if (!root->ref_cows)
2398 return root;
2399
2400 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2401 fs_root = root;
2402
2403 if (next != node)
2404 return NULL;
2405
2406 next = walk_down_backref(edges, &index);
2407 if (!next || next->level <= node->level)
2408 break;
2409 }
2410
2411 if (!fs_root)
2412 return ERR_PTR(-ENOENT);
2413 return fs_root;
2414}
2415
2416static noinline_for_stack
2417u64 calcu_metadata_size(struct reloc_control *rc,
2418 struct backref_node *node, int reserve)
2419{
2420 struct backref_node *next = node;
2421 struct backref_edge *edge;
2422 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2423 u64 num_bytes = 0;
2424 int index = 0;
2425
2426 BUG_ON(reserve && node->processed);
2427
2428 while (next) {
2429 cond_resched();
2430 while (1) {
2431 if (next->processed && (reserve || next != node))
2432 break;
2433
2434 num_bytes += btrfs_level_size(rc->extent_root,
2435 next->level);
2436
2437 if (list_empty(&next->upper))
2438 break;
2439
2440 edge = list_entry(next->upper.next,
2441 struct backref_edge, list[LOWER]);
2442 edges[index++] = edge;
2443 next = edge->node[UPPER];
2444 }
2445 next = walk_down_backref(edges, &index);
2446 }
2447 return num_bytes;
2448}
2449
2450static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2451 struct reloc_control *rc,
2452 struct backref_node *node)
2453{
2454 struct btrfs_root *root = rc->extent_root;
2455 u64 num_bytes;
2456 int ret;
2457
2458 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2459
2460 trans->block_rsv = rc->block_rsv;
2461 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
2462 if (ret) {
2463 if (ret == -EAGAIN)
2464 rc->commit_transaction = 1;
2465 return ret;
2466 }
2467
2468 return 0;
2469}
2470
2471static void release_metadata_space(struct reloc_control *rc,
2472 struct backref_node *node)
2473{
2474 u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2;
2475 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes);
2476}
2477
2478/*
2479 * relocate a block tree, and then update pointers in upper level
2480 * blocks that reference the block to point to the new location.
2481 *
2482 * if called by link_to_upper, the block has already been relocated.
2483 * in that case this function just updates pointers.
2484 */
2485static int do_relocation(struct btrfs_trans_handle *trans,
2486 struct reloc_control *rc,
2487 struct backref_node *node,
2488 struct btrfs_key *key,
2489 struct btrfs_path *path, int lowest)
2490{
2491 struct backref_node *upper;
2492 struct backref_edge *edge;
2493 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2494 struct btrfs_root *root;
2495 struct extent_buffer *eb;
2496 u32 blocksize;
2497 u64 bytenr;
2498 u64 generation;
2499 int nr;
2500 int slot;
2501 int ret;
2502 int err = 0;
2503
2504 BUG_ON(lowest && node->eb);
2505
2506 path->lowest_level = node->level + 1;
2507 rc->backref_cache.path[node->level] = node;
2508 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2509 cond_resched();
2510
2511 upper = edge->node[UPPER];
2512 root = select_reloc_root(trans, rc, upper, edges, &nr);
2513 BUG_ON(!root);
2514
2515 if (upper->eb && !upper->locked) {
2516 if (!lowest) {
2517 ret = btrfs_bin_search(upper->eb, key,
2518 upper->level, &slot);
2519 BUG_ON(ret);
2520 bytenr = btrfs_node_blockptr(upper->eb, slot);
2521 if (node->eb->start == bytenr)
2522 goto next;
2523 }
2524 drop_node_buffer(upper);
2525 }
2526
2527 if (!upper->eb) {
2528 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2529 if (ret < 0) {
2530 err = ret;
2531 break;
2532 }
2533 BUG_ON(ret > 0);
2534
2535 if (!upper->eb) {
2536 upper->eb = path->nodes[upper->level];
2537 path->nodes[upper->level] = NULL;
2538 } else {
2539 BUG_ON(upper->eb != path->nodes[upper->level]);
2540 }
2541
2542 upper->locked = 1;
2543 path->locks[upper->level] = 0;
2544
2545 slot = path->slots[upper->level];
2546 btrfs_release_path(path);
2547 } else {
2548 ret = btrfs_bin_search(upper->eb, key, upper->level,
2549 &slot);
2550 BUG_ON(ret);
2551 }
2552
2553 bytenr = btrfs_node_blockptr(upper->eb, slot);
2554 if (lowest) {
2555 BUG_ON(bytenr != node->bytenr);
2556 } else {
2557 if (node->eb->start == bytenr)
2558 goto next;
2559 }
2560
2561 blocksize = btrfs_level_size(root, node->level);
2562 generation = btrfs_node_ptr_generation(upper->eb, slot);
2563 eb = read_tree_block(root, bytenr, blocksize, generation);
2564 if (!eb) {
2565 err = -EIO;
2566 goto next;
2567 }
2568 btrfs_tree_lock(eb);
2569 btrfs_set_lock_blocking(eb);
2570
2571 if (!node->eb) {
2572 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2573 slot, &eb);
2574 btrfs_tree_unlock(eb);
2575 free_extent_buffer(eb);
2576 if (ret < 0) {
2577 err = ret;
2578 goto next;
2579 }
2580 BUG_ON(node->eb != eb);
2581 } else {
2582 btrfs_set_node_blockptr(upper->eb, slot,
2583 node->eb->start);
2584 btrfs_set_node_ptr_generation(upper->eb, slot,
2585 trans->transid);
2586 btrfs_mark_buffer_dirty(upper->eb);
2587
2588 ret = btrfs_inc_extent_ref(trans, root,
2589 node->eb->start, blocksize,
2590 upper->eb->start,
2591 btrfs_header_owner(upper->eb),
2592 node->level, 0, 1);
2593 BUG_ON(ret);
2594
2595 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2596 BUG_ON(ret);
2597 }
2598next:
2599 if (!upper->pending)
2600 drop_node_buffer(upper);
2601 else
2602 unlock_node_buffer(upper);
2603 if (err)
2604 break;
2605 }
2606
2607 if (!err && node->pending) {
2608 drop_node_buffer(node);
2609 list_move_tail(&node->list, &rc->backref_cache.changed);
2610 node->pending = 0;
2611 }
2612
2613 path->lowest_level = 0;
2614 BUG_ON(err == -ENOSPC);
2615 return err;
2616}
2617
2618static int link_to_upper(struct btrfs_trans_handle *trans,
2619 struct reloc_control *rc,
2620 struct backref_node *node,
2621 struct btrfs_path *path)
2622{
2623 struct btrfs_key key;
2624
2625 btrfs_node_key_to_cpu(node->eb, &key, 0);
2626 return do_relocation(trans, rc, node, &key, path, 0);
2627}
2628
2629static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2630 struct reloc_control *rc,
2631 struct btrfs_path *path, int err)
2632{
2633 LIST_HEAD(list);
2634 struct backref_cache *cache = &rc->backref_cache;
2635 struct backref_node *node;
2636 int level;
2637 int ret;
2638
2639 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2640 while (!list_empty(&cache->pending[level])) {
2641 node = list_entry(cache->pending[level].next,
2642 struct backref_node, list);
2643 list_move_tail(&node->list, &list);
2644 BUG_ON(!node->pending);
2645
2646 if (!err) {
2647 ret = link_to_upper(trans, rc, node, path);
2648 if (ret < 0)
2649 err = ret;
2650 }
2651 }
2652 list_splice_init(&list, &cache->pending[level]);
2653 }
2654 return err;
2655}
2656
2657static void mark_block_processed(struct reloc_control *rc,
2658 u64 bytenr, u32 blocksize)
2659{
2660 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2661 EXTENT_DIRTY, GFP_NOFS);
2662}
2663
2664static void __mark_block_processed(struct reloc_control *rc,
2665 struct backref_node *node)
2666{
2667 u32 blocksize;
2668 if (node->level == 0 ||
2669 in_block_group(node->bytenr, rc->block_group)) {
2670 blocksize = btrfs_level_size(rc->extent_root, node->level);
2671 mark_block_processed(rc, node->bytenr, blocksize);
2672 }
2673 node->processed = 1;
2674}
2675
2676/*
2677 * mark a block and all blocks directly/indirectly reference the block
2678 * as processed.
2679 */
2680static void update_processed_blocks(struct reloc_control *rc,
2681 struct backref_node *node)
2682{
2683 struct backref_node *next = node;
2684 struct backref_edge *edge;
2685 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2686 int index = 0;
2687
2688 while (next) {
2689 cond_resched();
2690 while (1) {
2691 if (next->processed)
2692 break;
2693
2694 __mark_block_processed(rc, next);
2695
2696 if (list_empty(&next->upper))
2697 break;
2698
2699 edge = list_entry(next->upper.next,
2700 struct backref_edge, list[LOWER]);
2701 edges[index++] = edge;
2702 next = edge->node[UPPER];
2703 }
2704 next = walk_down_backref(edges, &index);
2705 }
2706}
2707
2708static int tree_block_processed(u64 bytenr, u32 blocksize,
2709 struct reloc_control *rc)
2710{
2711 if (test_range_bit(&rc->processed_blocks, bytenr,
2712 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2713 return 1;
2714 return 0;
2715}
2716
2717static int get_tree_block_key(struct reloc_control *rc,
2718 struct tree_block *block)
2719{
2720 struct extent_buffer *eb;
2721
2722 BUG_ON(block->key_ready);
2723 eb = read_tree_block(rc->extent_root, block->bytenr,
2724 block->key.objectid, block->key.offset);
2725 BUG_ON(!eb);
2726 WARN_ON(btrfs_header_level(eb) != block->level);
2727 if (block->level == 0)
2728 btrfs_item_key_to_cpu(eb, &block->key, 0);
2729 else
2730 btrfs_node_key_to_cpu(eb, &block->key, 0);
2731 free_extent_buffer(eb);
2732 block->key_ready = 1;
2733 return 0;
2734}
2735
2736static int reada_tree_block(struct reloc_control *rc,
2737 struct tree_block *block)
2738{
2739 BUG_ON(block->key_ready);
2740 readahead_tree_block(rc->extent_root, block->bytenr,
2741 block->key.objectid, block->key.offset);
2742 return 0;
2743}
2744
2745/*
2746 * helper function to relocate a tree block
2747 */
2748static int relocate_tree_block(struct btrfs_trans_handle *trans,
2749 struct reloc_control *rc,
2750 struct backref_node *node,
2751 struct btrfs_key *key,
2752 struct btrfs_path *path)
2753{
2754 struct btrfs_root *root;
2755 int release = 0;
2756 int ret = 0;
2757
2758 if (!node)
2759 return 0;
2760
2761 BUG_ON(node->processed);
2762 root = select_one_root(trans, node);
2763 if (root == ERR_PTR(-ENOENT)) {
2764 update_processed_blocks(rc, node);
2765 goto out;
2766 }
2767
2768 if (!root || root->ref_cows) {
2769 ret = reserve_metadata_space(trans, rc, node);
2770 if (ret)
2771 goto out;
2772 release = 1;
2773 }
2774
2775 if (root) {
2776 if (root->ref_cows) {
2777 BUG_ON(node->new_bytenr);
2778 BUG_ON(!list_empty(&node->list));
2779 btrfs_record_root_in_trans(trans, root);
2780 root = root->reloc_root;
2781 node->new_bytenr = root->node->start;
2782 node->root = root;
2783 list_add_tail(&node->list, &rc->backref_cache.changed);
2784 } else {
2785 path->lowest_level = node->level;
2786 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2787 btrfs_release_path(path);
2788 if (ret > 0)
2789 ret = 0;
2790 }
2791 if (!ret)
2792 update_processed_blocks(rc, node);
2793 } else {
2794 ret = do_relocation(trans, rc, node, key, path, 1);
2795 }
2796out:
2797 if (ret || node->level == 0 || node->cowonly) {
2798 if (release)
2799 release_metadata_space(rc, node);
2800 remove_backref_node(&rc->backref_cache, node);
2801 }
2802 return ret;
2803}
2804
2805/*
2806 * relocate a list of blocks
2807 */
2808static noinline_for_stack
2809int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2810 struct reloc_control *rc, struct rb_root *blocks)
2811{
2812 struct backref_node *node;
2813 struct btrfs_path *path;
2814 struct tree_block *block;
2815 struct rb_node *rb_node;
2816 int ret;
2817 int err = 0;
2818
2819 path = btrfs_alloc_path();
2820 if (!path)
2821 return -ENOMEM;
2822
2823 rb_node = rb_first(blocks);
2824 while (rb_node) {
2825 block = rb_entry(rb_node, struct tree_block, rb_node);
2826 if (!block->key_ready)
2827 reada_tree_block(rc, block);
2828 rb_node = rb_next(rb_node);
2829 }
2830
2831 rb_node = rb_first(blocks);
2832 while (rb_node) {
2833 block = rb_entry(rb_node, struct tree_block, rb_node);
2834 if (!block->key_ready)
2835 get_tree_block_key(rc, block);
2836 rb_node = rb_next(rb_node);
2837 }
2838
2839 rb_node = rb_first(blocks);
2840 while (rb_node) {
2841 block = rb_entry(rb_node, struct tree_block, rb_node);
2842
2843 node = build_backref_tree(rc, &block->key,
2844 block->level, block->bytenr);
2845 if (IS_ERR(node)) {
2846 err = PTR_ERR(node);
2847 goto out;
2848 }
2849
2850 ret = relocate_tree_block(trans, rc, node, &block->key,
2851 path);
2852 if (ret < 0) {
2853 if (ret != -EAGAIN || rb_node == rb_first(blocks))
2854 err = ret;
2855 goto out;
2856 }
2857 rb_node = rb_next(rb_node);
2858 }
2859out:
2860 free_block_list(blocks);
2861 err = finish_pending_nodes(trans, rc, path, err);
2862
2863 btrfs_free_path(path);
2864 return err;
2865}
2866
2867static noinline_for_stack
2868int prealloc_file_extent_cluster(struct inode *inode,
2869 struct file_extent_cluster *cluster)
2870{
2871 u64 alloc_hint = 0;
2872 u64 start;
2873 u64 end;
2874 u64 offset = BTRFS_I(inode)->index_cnt;
2875 u64 num_bytes;
2876 int nr = 0;
2877 int ret = 0;
2878
2879 BUG_ON(cluster->start != cluster->boundary[0]);
2880 mutex_lock(&inode->i_mutex);
2881
2882 ret = btrfs_check_data_free_space(inode, cluster->end +
2883 1 - cluster->start);
2884 if (ret)
2885 goto out;
2886
2887 while (nr < cluster->nr) {
2888 start = cluster->boundary[nr] - offset;
2889 if (nr + 1 < cluster->nr)
2890 end = cluster->boundary[nr + 1] - 1 - offset;
2891 else
2892 end = cluster->end - offset;
2893
2894 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2895 num_bytes = end + 1 - start;
2896 ret = btrfs_prealloc_file_range(inode, 0, start,
2897 num_bytes, num_bytes,
2898 end + 1, &alloc_hint);
2899 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2900 if (ret)
2901 break;
2902 nr++;
2903 }
2904 btrfs_free_reserved_data_space(inode, cluster->end +
2905 1 - cluster->start);
2906out:
2907 mutex_unlock(&inode->i_mutex);
2908 return ret;
2909}
2910
2911static noinline_for_stack
2912int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2913 u64 block_start)
2914{
2915 struct btrfs_root *root = BTRFS_I(inode)->root;
2916 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2917 struct extent_map *em;
2918 int ret = 0;
2919
2920 em = alloc_extent_map();
2921 if (!em)
2922 return -ENOMEM;
2923
2924 em->start = start;
2925 em->len = end + 1 - start;
2926 em->block_len = em->len;
2927 em->block_start = block_start;
2928 em->bdev = root->fs_info->fs_devices->latest_bdev;
2929 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2930
2931 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2932 while (1) {
2933 write_lock(&em_tree->lock);
2934 ret = add_extent_mapping(em_tree, em);
2935 write_unlock(&em_tree->lock);
2936 if (ret != -EEXIST) {
2937 free_extent_map(em);
2938 break;
2939 }
2940 btrfs_drop_extent_cache(inode, start, end, 0);
2941 }
2942 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2943 return ret;
2944}
2945
2946static int relocate_file_extent_cluster(struct inode *inode,
2947 struct file_extent_cluster *cluster)
2948{
2949 u64 page_start;
2950 u64 page_end;
2951 u64 offset = BTRFS_I(inode)->index_cnt;
2952 unsigned long index;
2953 unsigned long last_index;
2954 struct page *page;
2955 struct file_ra_state *ra;
2956 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2957 int nr = 0;
2958 int ret = 0;
2959
2960 if (!cluster->nr)
2961 return 0;
2962
2963 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2964 if (!ra)
2965 return -ENOMEM;
2966
2967 ret = prealloc_file_extent_cluster(inode, cluster);
2968 if (ret)
2969 goto out;
2970
2971 file_ra_state_init(ra, inode->i_mapping);
2972
2973 ret = setup_extent_mapping(inode, cluster->start - offset,
2974 cluster->end - offset, cluster->start);
2975 if (ret)
2976 goto out;
2977
2978 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
2979 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
2980 while (index <= last_index) {
2981 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
2982 if (ret)
2983 goto out;
2984
2985 page = find_lock_page(inode->i_mapping, index);
2986 if (!page) {
2987 page_cache_sync_readahead(inode->i_mapping,
2988 ra, NULL, index,
2989 last_index + 1 - index);
2990 page = find_or_create_page(inode->i_mapping, index,
2991 mask);
2992 if (!page) {
2993 btrfs_delalloc_release_metadata(inode,
2994 PAGE_CACHE_SIZE);
2995 ret = -ENOMEM;
2996 goto out;
2997 }
2998 }
2999
3000 if (PageReadahead(page)) {
3001 page_cache_async_readahead(inode->i_mapping,
3002 ra, NULL, page, index,
3003 last_index + 1 - index);
3004 }
3005
3006 if (!PageUptodate(page)) {
3007 btrfs_readpage(NULL, page);
3008 lock_page(page);
3009 if (!PageUptodate(page)) {
3010 unlock_page(page);
3011 page_cache_release(page);
3012 btrfs_delalloc_release_metadata(inode,
3013 PAGE_CACHE_SIZE);
3014 ret = -EIO;
3015 goto out;
3016 }
3017 }
3018
3019 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
3020 page_end = page_start + PAGE_CACHE_SIZE - 1;
3021
3022 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3023
3024 set_page_extent_mapped(page);
3025
3026 if (nr < cluster->nr &&
3027 page_start + offset == cluster->boundary[nr]) {
3028 set_extent_bits(&BTRFS_I(inode)->io_tree,
3029 page_start, page_end,
3030 EXTENT_BOUNDARY, GFP_NOFS);
3031 nr++;
3032 }
3033
3034 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
3035 set_page_dirty(page);
3036
3037 unlock_extent(&BTRFS_I(inode)->io_tree,
3038 page_start, page_end);
3039 unlock_page(page);
3040 page_cache_release(page);
3041
3042 index++;
3043 balance_dirty_pages_ratelimited(inode->i_mapping);
3044 btrfs_throttle(BTRFS_I(inode)->root);
3045 }
3046 WARN_ON(nr != cluster->nr);
3047out:
3048 kfree(ra);
3049 return ret;
3050}
3051
3052static noinline_for_stack
3053int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3054 struct file_extent_cluster *cluster)
3055{
3056 int ret;
3057
3058 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3059 ret = relocate_file_extent_cluster(inode, cluster);
3060 if (ret)
3061 return ret;
3062 cluster->nr = 0;
3063 }
3064
3065 if (!cluster->nr)
3066 cluster->start = extent_key->objectid;
3067 else
3068 BUG_ON(cluster->nr >= MAX_EXTENTS);
3069 cluster->end = extent_key->objectid + extent_key->offset - 1;
3070 cluster->boundary[cluster->nr] = extent_key->objectid;
3071 cluster->nr++;
3072
3073 if (cluster->nr >= MAX_EXTENTS) {
3074 ret = relocate_file_extent_cluster(inode, cluster);
3075 if (ret)
3076 return ret;
3077 cluster->nr = 0;
3078 }
3079 return 0;
3080}
3081
3082#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3083static int get_ref_objectid_v0(struct reloc_control *rc,
3084 struct btrfs_path *path,
3085 struct btrfs_key *extent_key,
3086 u64 *ref_objectid, int *path_change)
3087{
3088 struct btrfs_key key;
3089 struct extent_buffer *leaf;
3090 struct btrfs_extent_ref_v0 *ref0;
3091 int ret;
3092 int slot;
3093
3094 leaf = path->nodes[0];
3095 slot = path->slots[0];
3096 while (1) {
3097 if (slot >= btrfs_header_nritems(leaf)) {
3098 ret = btrfs_next_leaf(rc->extent_root, path);
3099 if (ret < 0)
3100 return ret;
3101 BUG_ON(ret > 0);
3102 leaf = path->nodes[0];
3103 slot = path->slots[0];
3104 if (path_change)
3105 *path_change = 1;
3106 }
3107 btrfs_item_key_to_cpu(leaf, &key, slot);
3108 if (key.objectid != extent_key->objectid)
3109 return -ENOENT;
3110
3111 if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
3112 slot++;
3113 continue;
3114 }
3115 ref0 = btrfs_item_ptr(leaf, slot,
3116 struct btrfs_extent_ref_v0);
3117 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
3118 break;
3119 }
3120 return 0;
3121}
3122#endif
3123
3124/*
3125 * helper to add a tree block to the list.
3126 * the major work is getting the generation and level of the block
3127 */
3128static int add_tree_block(struct reloc_control *rc,
3129 struct btrfs_key *extent_key,
3130 struct btrfs_path *path,
3131 struct rb_root *blocks)
3132{
3133 struct extent_buffer *eb;
3134 struct btrfs_extent_item *ei;
3135 struct btrfs_tree_block_info *bi;
3136 struct tree_block *block;
3137 struct rb_node *rb_node;
3138 u32 item_size;
3139 int level = -1;
3140 int generation;
3141
3142 eb = path->nodes[0];
3143 item_size = btrfs_item_size_nr(eb, path->slots[0]);
3144
3145 if (item_size >= sizeof(*ei) + sizeof(*bi)) {
3146 ei = btrfs_item_ptr(eb, path->slots[0],
3147 struct btrfs_extent_item);
3148 bi = (struct btrfs_tree_block_info *)(ei + 1);
3149 generation = btrfs_extent_generation(eb, ei);
3150 level = btrfs_tree_block_level(eb, bi);
3151 } else {
3152#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3153 u64 ref_owner;
3154 int ret;
3155
3156 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3157 ret = get_ref_objectid_v0(rc, path, extent_key,
3158 &ref_owner, NULL);
3159 if (ret < 0)
3160 return ret;
3161 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
3162 level = (int)ref_owner;
3163 /* FIXME: get real generation */
3164 generation = 0;
3165#else
3166 BUG();
3167#endif
3168 }
3169
3170 btrfs_release_path(path);
3171
3172 BUG_ON(level == -1);
3173
3174 block = kmalloc(sizeof(*block), GFP_NOFS);
3175 if (!block)
3176 return -ENOMEM;
3177
3178 block->bytenr = extent_key->objectid;
3179 block->key.objectid = extent_key->offset;
3180 block->key.offset = generation;
3181 block->level = level;
3182 block->key_ready = 0;
3183
3184 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3185 if (rb_node)
3186 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3187
3188 return 0;
3189}
3190
3191/*
3192 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3193 */
3194static int __add_tree_block(struct reloc_control *rc,
3195 u64 bytenr, u32 blocksize,
3196 struct rb_root *blocks)
3197{
3198 struct btrfs_path *path;
3199 struct btrfs_key key;
3200 int ret;
3201
3202 if (tree_block_processed(bytenr, blocksize, rc))
3203 return 0;
3204
3205 if (tree_search(blocks, bytenr))
3206 return 0;
3207
3208 path = btrfs_alloc_path();
3209 if (!path)
3210 return -ENOMEM;
3211
3212 key.objectid = bytenr;
3213 key.type = BTRFS_EXTENT_ITEM_KEY;
3214 key.offset = blocksize;
3215
3216 path->search_commit_root = 1;
3217 path->skip_locking = 1;
3218 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3219 if (ret < 0)
3220 goto out;
3221 BUG_ON(ret);
3222
3223 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3224 ret = add_tree_block(rc, &key, path, blocks);
3225out:
3226 btrfs_free_path(path);
3227 return ret;
3228}
3229
3230/*
3231 * helper to check if the block use full backrefs for pointers in it
3232 */
3233static int block_use_full_backref(struct reloc_control *rc,
3234 struct extent_buffer *eb)
3235{
3236 u64 flags;
3237 int ret;
3238
3239 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
3240 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
3241 return 1;
3242
3243 ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
3244 eb->start, eb->len, NULL, &flags);
3245 BUG_ON(ret);
3246
3247 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3248 ret = 1;
3249 else
3250 ret = 0;
3251 return ret;
3252}
3253
3254static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3255 struct inode *inode, u64 ino)
3256{
3257 struct btrfs_key key;
3258 struct btrfs_path *path;
3259 struct btrfs_root *root = fs_info->tree_root;
3260 struct btrfs_trans_handle *trans;
3261 unsigned long nr;
3262 int ret = 0;
3263
3264 if (inode)
3265 goto truncate;
3266
3267 key.objectid = ino;
3268 key.type = BTRFS_INODE_ITEM_KEY;
3269 key.offset = 0;
3270
3271 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3272 if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
3273 if (inode && !IS_ERR(inode))
3274 iput(inode);
3275 return -ENOENT;
3276 }
3277
3278truncate:
3279 path = btrfs_alloc_path();
3280 if (!path) {
3281 ret = -ENOMEM;
3282 goto out;
3283 }
3284
3285 trans = btrfs_join_transaction(root);
3286 if (IS_ERR(trans)) {
3287 btrfs_free_path(path);
3288 ret = PTR_ERR(trans);
3289 goto out;
3290 }
3291
3292 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
3293
3294 btrfs_free_path(path);
3295 nr = trans->blocks_used;
3296 btrfs_end_transaction(trans, root);
3297 btrfs_btree_balance_dirty(root, nr);
3298out:
3299 iput(inode);
3300 return ret;
3301}
3302
3303/*
3304 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
3305 * this function scans fs tree to find blocks reference the data extent
3306 */
3307static int find_data_references(struct reloc_control *rc,
3308 struct btrfs_key *extent_key,
3309 struct extent_buffer *leaf,
3310 struct btrfs_extent_data_ref *ref,
3311 struct rb_root *blocks)
3312{
3313 struct btrfs_path *path;
3314 struct tree_block *block;
3315 struct btrfs_root *root;
3316 struct btrfs_file_extent_item *fi;
3317 struct rb_node *rb_node;
3318 struct btrfs_key key;
3319 u64 ref_root;
3320 u64 ref_objectid;
3321 u64 ref_offset;
3322 u32 ref_count;
3323 u32 nritems;
3324 int err = 0;
3325 int added = 0;
3326 int counted;
3327 int ret;
3328
3329 ref_root = btrfs_extent_data_ref_root(leaf, ref);
3330 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
3331 ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
3332 ref_count = btrfs_extent_data_ref_count(leaf, ref);
3333
3334 /*
3335 * This is an extent belonging to the free space cache, lets just delete
3336 * it and redo the search.
3337 */
3338 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
3339 ret = delete_block_group_cache(rc->extent_root->fs_info,
3340 NULL, ref_objectid);
3341 if (ret != -ENOENT)
3342 return ret;
3343 ret = 0;
3344 }
3345
3346 path = btrfs_alloc_path();
3347 if (!path)
3348 return -ENOMEM;
3349 path->reada = 1;
3350
3351 root = read_fs_root(rc->extent_root->fs_info, ref_root);
3352 if (IS_ERR(root)) {
3353 err = PTR_ERR(root);
3354 goto out;
3355 }
3356
3357 key.objectid = ref_objectid;
3358 key.type = BTRFS_EXTENT_DATA_KEY;
3359 if (ref_offset > ((u64)-1 << 32))
3360 key.offset = 0;
3361 else
3362 key.offset = ref_offset;
3363
3364 path->search_commit_root = 1;
3365 path->skip_locking = 1;
3366 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3367 if (ret < 0) {
3368 err = ret;
3369 goto out;
3370 }
3371
3372 leaf = path->nodes[0];
3373 nritems = btrfs_header_nritems(leaf);
3374 /*
3375 * the references in tree blocks that use full backrefs
3376 * are not counted in
3377 */
3378 if (block_use_full_backref(rc, leaf))
3379 counted = 0;
3380 else
3381 counted = 1;
3382 rb_node = tree_search(blocks, leaf->start);
3383 if (rb_node) {
3384 if (counted)
3385 added = 1;
3386 else
3387 path->slots[0] = nritems;
3388 }
3389
3390 while (ref_count > 0) {
3391 while (path->slots[0] >= nritems) {
3392 ret = btrfs_next_leaf(root, path);
3393 if (ret < 0) {
3394 err = ret;
3395 goto out;
3396 }
3397 if (ret > 0) {
3398 WARN_ON(1);
3399 goto out;
3400 }
3401
3402 leaf = path->nodes[0];
3403 nritems = btrfs_header_nritems(leaf);
3404 added = 0;
3405
3406 if (block_use_full_backref(rc, leaf))
3407 counted = 0;
3408 else
3409 counted = 1;
3410 rb_node = tree_search(blocks, leaf->start);
3411 if (rb_node) {
3412 if (counted)
3413 added = 1;
3414 else
3415 path->slots[0] = nritems;
3416 }
3417 }
3418
3419 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3420 if (key.objectid != ref_objectid ||
3421 key.type != BTRFS_EXTENT_DATA_KEY) {
3422 WARN_ON(1);
3423 break;
3424 }
3425
3426 fi = btrfs_item_ptr(leaf, path->slots[0],
3427 struct btrfs_file_extent_item);
3428
3429 if (btrfs_file_extent_type(leaf, fi) ==
3430 BTRFS_FILE_EXTENT_INLINE)
3431 goto next;
3432
3433 if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3434 extent_key->objectid)
3435 goto next;
3436
3437 key.offset -= btrfs_file_extent_offset(leaf, fi);
3438 if (key.offset != ref_offset)
3439 goto next;
3440
3441 if (counted)
3442 ref_count--;
3443 if (added)
3444 goto next;
3445
3446 if (!tree_block_processed(leaf->start, leaf->len, rc)) {
3447 block = kmalloc(sizeof(*block), GFP_NOFS);
3448 if (!block) {
3449 err = -ENOMEM;
3450 break;
3451 }
3452 block->bytenr = leaf->start;
3453 btrfs_item_key_to_cpu(leaf, &block->key, 0);
3454 block->level = 0;
3455 block->key_ready = 1;
3456 rb_node = tree_insert(blocks, block->bytenr,
3457 &block->rb_node);
3458 if (rb_node)
3459 backref_tree_panic(rb_node, -EEXIST,
3460 block->bytenr);
3461 }
3462 if (counted)
3463 added = 1;
3464 else
3465 path->slots[0] = nritems;
3466next:
3467 path->slots[0]++;
3468
3469 }
3470out:
3471 btrfs_free_path(path);
3472 return err;
3473}
3474
3475/*
3476 * hepler to find all tree blocks that reference a given data extent
3477 */
3478static noinline_for_stack
3479int add_data_references(struct reloc_control *rc,
3480 struct btrfs_key *extent_key,
3481 struct btrfs_path *path,
3482 struct rb_root *blocks)
3483{
3484 struct btrfs_key key;
3485 struct extent_buffer *eb;
3486 struct btrfs_extent_data_ref *dref;
3487 struct btrfs_extent_inline_ref *iref;
3488 unsigned long ptr;
3489 unsigned long end;
3490 u32 blocksize = btrfs_level_size(rc->extent_root, 0);
3491 int ret;
3492 int err = 0;
3493
3494 eb = path->nodes[0];
3495 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3496 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3497#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3498 if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
3499 ptr = end;
3500 else
3501#endif
3502 ptr += sizeof(struct btrfs_extent_item);
3503
3504 while (ptr < end) {
3505 iref = (struct btrfs_extent_inline_ref *)ptr;
3506 key.type = btrfs_extent_inline_ref_type(eb, iref);
3507 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3508 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3509 ret = __add_tree_block(rc, key.offset, blocksize,
3510 blocks);
3511 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3512 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
3513 ret = find_data_references(rc, extent_key,
3514 eb, dref, blocks);
3515 } else {
3516 BUG();
3517 }
3518 ptr += btrfs_extent_inline_ref_size(key.type);
3519 }
3520 WARN_ON(ptr > end);
3521
3522 while (1) {
3523 cond_resched();
3524 eb = path->nodes[0];
3525 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3526 ret = btrfs_next_leaf(rc->extent_root, path);
3527 if (ret < 0) {
3528 err = ret;
3529 break;
3530 }
3531 if (ret > 0)
3532 break;
3533 eb = path->nodes[0];
3534 }
3535
3536 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3537 if (key.objectid != extent_key->objectid)
3538 break;
3539
3540#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3541 if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
3542 key.type == BTRFS_EXTENT_REF_V0_KEY) {
3543#else
3544 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
3545 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3546#endif
3547 ret = __add_tree_block(rc, key.offset, blocksize,
3548 blocks);
3549 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3550 dref = btrfs_item_ptr(eb, path->slots[0],
3551 struct btrfs_extent_data_ref);
3552 ret = find_data_references(rc, extent_key,
3553 eb, dref, blocks);
3554 } else {
3555 ret = 0;
3556 }
3557 if (ret) {
3558 err = ret;
3559 break;
3560 }
3561 path->slots[0]++;
3562 }
3563 btrfs_release_path(path);
3564 if (err)
3565 free_block_list(blocks);
3566 return err;
3567}
3568
3569/*
3570 * hepler to find next unprocessed extent
3571 */
3572static noinline_for_stack
3573int find_next_extent(struct btrfs_trans_handle *trans,
3574 struct reloc_control *rc, struct btrfs_path *path,
3575 struct btrfs_key *extent_key)
3576{
3577 struct btrfs_key key;
3578 struct extent_buffer *leaf;
3579 u64 start, end, last;
3580 int ret;
3581
3582 last = rc->block_group->key.objectid + rc->block_group->key.offset;
3583 while (1) {
3584 cond_resched();
3585 if (rc->search_start >= last) {
3586 ret = 1;
3587 break;
3588 }
3589
3590 key.objectid = rc->search_start;
3591 key.type = BTRFS_EXTENT_ITEM_KEY;
3592 key.offset = 0;
3593
3594 path->search_commit_root = 1;
3595 path->skip_locking = 1;
3596 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3597 0, 0);
3598 if (ret < 0)
3599 break;
3600next:
3601 leaf = path->nodes[0];
3602 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3603 ret = btrfs_next_leaf(rc->extent_root, path);
3604 if (ret != 0)
3605 break;
3606 leaf = path->nodes[0];
3607 }
3608
3609 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3610 if (key.objectid >= last) {
3611 ret = 1;
3612 break;
3613 }
3614
3615 if (key.type != BTRFS_EXTENT_ITEM_KEY ||
3616 key.objectid + key.offset <= rc->search_start) {
3617 path->slots[0]++;
3618 goto next;
3619 }
3620
3621 ret = find_first_extent_bit(&rc->processed_blocks,
3622 key.objectid, &start, &end,
3623 EXTENT_DIRTY);
3624
3625 if (ret == 0 && start <= key.objectid) {
3626 btrfs_release_path(path);
3627 rc->search_start = end + 1;
3628 } else {
3629 rc->search_start = key.objectid + key.offset;
3630 memcpy(extent_key, &key, sizeof(key));
3631 return 0;
3632 }
3633 }
3634 btrfs_release_path(path);
3635 return ret;
3636}
3637
3638static void set_reloc_control(struct reloc_control *rc)
3639{
3640 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3641
3642 mutex_lock(&fs_info->reloc_mutex);
3643 fs_info->reloc_ctl = rc;
3644 mutex_unlock(&fs_info->reloc_mutex);
3645}
3646
3647static void unset_reloc_control(struct reloc_control *rc)
3648{
3649 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3650
3651 mutex_lock(&fs_info->reloc_mutex);
3652 fs_info->reloc_ctl = NULL;
3653 mutex_unlock(&fs_info->reloc_mutex);
3654}
3655
3656static int check_extent_flags(u64 flags)
3657{
3658 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3659 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3660 return 1;
3661 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3662 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3663 return 1;
3664 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3665 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3666 return 1;
3667 return 0;
3668}
3669
3670static noinline_for_stack
3671int prepare_to_relocate(struct reloc_control *rc)
3672{
3673 struct btrfs_trans_handle *trans;
3674 int ret;
3675
3676 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root);
3677 if (!rc->block_rsv)
3678 return -ENOMEM;
3679
3680 /*
3681 * reserve some space for creating reloc trees.
3682 * btrfs_init_reloc_root will use them when there
3683 * is no reservation in transaction handle.
3684 */
3685 ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv,
3686 rc->extent_root->nodesize * 256);
3687 if (ret)
3688 return ret;
3689
3690 memset(&rc->cluster, 0, sizeof(rc->cluster));
3691 rc->search_start = rc->block_group->key.objectid;
3692 rc->extents_found = 0;
3693 rc->nodes_relocated = 0;
3694 rc->merging_rsv_size = 0;
3695
3696 rc->create_reloc_tree = 1;
3697 set_reloc_control(rc);
3698
3699 trans = btrfs_join_transaction(rc->extent_root);
3700 BUG_ON(IS_ERR(trans));
3701 btrfs_commit_transaction(trans, rc->extent_root);
3702 return 0;
3703}
3704
3705static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3706{
3707 struct rb_root blocks = RB_ROOT;
3708 struct btrfs_key key;
3709 struct btrfs_trans_handle *trans = NULL;
3710 struct btrfs_path *path;
3711 struct btrfs_extent_item *ei;
3712 unsigned long nr;
3713 u64 flags;
3714 u32 item_size;
3715 int ret;
3716 int err = 0;
3717 int progress = 0;
3718
3719 path = btrfs_alloc_path();
3720 if (!path)
3721 return -ENOMEM;
3722 path->reada = 1;
3723
3724 ret = prepare_to_relocate(rc);
3725 if (ret) {
3726 err = ret;
3727 goto out_free;
3728 }
3729
3730 while (1) {
3731 progress++;
3732 trans = btrfs_start_transaction(rc->extent_root, 0);
3733 BUG_ON(IS_ERR(trans));
3734restart:
3735 if (update_backref_cache(trans, &rc->backref_cache)) {
3736 btrfs_end_transaction(trans, rc->extent_root);
3737 continue;
3738 }
3739
3740 ret = find_next_extent(trans, rc, path, &key);
3741 if (ret < 0)
3742 err = ret;
3743 if (ret != 0)
3744 break;
3745
3746 rc->extents_found++;
3747
3748 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3749 struct btrfs_extent_item);
3750 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
3751 if (item_size >= sizeof(*ei)) {
3752 flags = btrfs_extent_flags(path->nodes[0], ei);
3753 ret = check_extent_flags(flags);
3754 BUG_ON(ret);
3755
3756 } else {
3757#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3758 u64 ref_owner;
3759 int path_change = 0;
3760
3761 BUG_ON(item_size !=
3762 sizeof(struct btrfs_extent_item_v0));
3763 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
3764 &path_change);
3765 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
3766 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
3767 else
3768 flags = BTRFS_EXTENT_FLAG_DATA;
3769
3770 if (path_change) {
3771 btrfs_release_path(path);
3772
3773 path->search_commit_root = 1;
3774 path->skip_locking = 1;
3775 ret = btrfs_search_slot(NULL, rc->extent_root,
3776 &key, path, 0, 0);
3777 if (ret < 0) {
3778 err = ret;
3779 break;
3780 }
3781 BUG_ON(ret > 0);
3782 }
3783#else
3784 BUG();
3785#endif
3786 }
3787
3788 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3789 ret = add_tree_block(rc, &key, path, &blocks);
3790 } else if (rc->stage == UPDATE_DATA_PTRS &&
3791 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3792 ret = add_data_references(rc, &key, path, &blocks);
3793 } else {
3794 btrfs_release_path(path);
3795 ret = 0;
3796 }
3797 if (ret < 0) {
3798 err = ret;
3799 break;
3800 }
3801
3802 if (!RB_EMPTY_ROOT(&blocks)) {
3803 ret = relocate_tree_blocks(trans, rc, &blocks);
3804 if (ret < 0) {
3805 if (ret != -EAGAIN) {
3806 err = ret;
3807 break;
3808 }
3809 rc->extents_found--;
3810 rc->search_start = key.objectid;
3811 }
3812 }
3813
3814 ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5);
3815 if (ret < 0) {
3816 if (ret != -ENOSPC) {
3817 err = ret;
3818 WARN_ON(1);
3819 break;
3820 }
3821 rc->commit_transaction = 1;
3822 }
3823
3824 if (rc->commit_transaction) {
3825 rc->commit_transaction = 0;
3826 ret = btrfs_commit_transaction(trans, rc->extent_root);
3827 BUG_ON(ret);
3828 } else {
3829 nr = trans->blocks_used;
3830 btrfs_end_transaction_throttle(trans, rc->extent_root);
3831 btrfs_btree_balance_dirty(rc->extent_root, nr);
3832 }
3833 trans = NULL;
3834
3835 if (rc->stage == MOVE_DATA_EXTENTS &&
3836 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3837 rc->found_file_extent = 1;
3838 ret = relocate_data_extent(rc->data_inode,
3839 &key, &rc->cluster);
3840 if (ret < 0) {
3841 err = ret;
3842 break;
3843 }
3844 }
3845 }
3846 if (trans && progress && err == -ENOSPC) {
3847 ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
3848 rc->block_group->flags);
3849 if (ret == 0) {
3850 err = 0;
3851 progress = 0;
3852 goto restart;
3853 }
3854 }
3855
3856 btrfs_release_path(path);
3857 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
3858 GFP_NOFS);
3859
3860 if (trans) {
3861 nr = trans->blocks_used;
3862 btrfs_end_transaction_throttle(trans, rc->extent_root);
3863 btrfs_btree_balance_dirty(rc->extent_root, nr);
3864 }
3865
3866 if (!err) {
3867 ret = relocate_file_extent_cluster(rc->data_inode,
3868 &rc->cluster);
3869 if (ret < 0)
3870 err = ret;
3871 }
3872
3873 rc->create_reloc_tree = 0;
3874 set_reloc_control(rc);
3875
3876 backref_cache_cleanup(&rc->backref_cache);
3877 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3878
3879 err = prepare_to_merge(rc, err);
3880
3881 merge_reloc_roots(rc);
3882
3883 rc->merge_reloc_tree = 0;
3884 unset_reloc_control(rc);
3885 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3886
3887 /* get rid of pinned extents */
3888 trans = btrfs_join_transaction(rc->extent_root);
3889 if (IS_ERR(trans))
3890 err = PTR_ERR(trans);
3891 else
3892 btrfs_commit_transaction(trans, rc->extent_root);
3893out_free:
3894 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
3895 btrfs_free_path(path);
3896 return err;
3897}
3898
3899static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3900 struct btrfs_root *root, u64 objectid)
3901{
3902 struct btrfs_path *path;
3903 struct btrfs_inode_item *item;
3904 struct extent_buffer *leaf;
3905 int ret;
3906
3907 path = btrfs_alloc_path();
3908 if (!path)
3909 return -ENOMEM;
3910
3911 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3912 if (ret)
3913 goto out;
3914
3915 leaf = path->nodes[0];
3916 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3917 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
3918 btrfs_set_inode_generation(leaf, item, 1);
3919 btrfs_set_inode_size(leaf, item, 0);
3920 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3921 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3922 BTRFS_INODE_PREALLOC);
3923 btrfs_mark_buffer_dirty(leaf);
3924 btrfs_release_path(path);
3925out:
3926 btrfs_free_path(path);
3927 return ret;
3928}
3929
3930/*
3931 * helper to create inode for data relocation.
3932 * the inode is in data relocation tree and its link count is 0
3933 */
3934static noinline_for_stack
3935struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3936 struct btrfs_block_group_cache *group)
3937{
3938 struct inode *inode = NULL;
3939 struct btrfs_trans_handle *trans;
3940 struct btrfs_root *root;
3941 struct btrfs_key key;
3942 unsigned long nr;
3943 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
3944 int err = 0;
3945
3946 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
3947 if (IS_ERR(root))
3948 return ERR_CAST(root);
3949
3950 trans = btrfs_start_transaction(root, 6);
3951 if (IS_ERR(trans))
3952 return ERR_CAST(trans);
3953
3954 err = btrfs_find_free_objectid(root, &objectid);
3955 if (err)
3956 goto out;
3957
3958 err = __insert_orphan_inode(trans, root, objectid);
3959 BUG_ON(err);
3960
3961 key.objectid = objectid;
3962 key.type = BTRFS_INODE_ITEM_KEY;
3963 key.offset = 0;
3964 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
3965 BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
3966 BTRFS_I(inode)->index_cnt = group->key.objectid;
3967
3968 err = btrfs_orphan_add(trans, inode);
3969out:
3970 nr = trans->blocks_used;
3971 btrfs_end_transaction(trans, root);
3972 btrfs_btree_balance_dirty(root, nr);
3973 if (err) {
3974 if (inode)
3975 iput(inode);
3976 inode = ERR_PTR(err);
3977 }
3978 return inode;
3979}
3980
3981static struct reloc_control *alloc_reloc_control(void)
3982{
3983 struct reloc_control *rc;
3984
3985 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3986 if (!rc)
3987 return NULL;
3988
3989 INIT_LIST_HEAD(&rc->reloc_roots);
3990 backref_cache_init(&rc->backref_cache);
3991 mapping_tree_init(&rc->reloc_root_tree);
3992 extent_io_tree_init(&rc->processed_blocks, NULL);
3993 return rc;
3994}
3995
3996/*
3997 * function to relocate all extents in a block group.
3998 */
3999int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
4000{
4001 struct btrfs_fs_info *fs_info = extent_root->fs_info;
4002 struct reloc_control *rc;
4003 struct inode *inode;
4004 struct btrfs_path *path;
4005 int ret;
4006 int rw = 0;
4007 int err = 0;
4008
4009 rc = alloc_reloc_control();
4010 if (!rc)
4011 return -ENOMEM;
4012
4013 rc->extent_root = extent_root;
4014
4015 rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
4016 BUG_ON(!rc->block_group);
4017
4018 if (!rc->block_group->ro) {
4019 ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
4020 if (ret) {
4021 err = ret;
4022 goto out;
4023 }
4024 rw = 1;
4025 }
4026
4027 path = btrfs_alloc_path();
4028 if (!path) {
4029 err = -ENOMEM;
4030 goto out;
4031 }
4032
4033 inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group,
4034 path);
4035 btrfs_free_path(path);
4036
4037 if (!IS_ERR(inode))
4038 ret = delete_block_group_cache(fs_info, inode, 0);
4039 else
4040 ret = PTR_ERR(inode);
4041
4042 if (ret && ret != -ENOENT) {
4043 err = ret;
4044 goto out;
4045 }
4046
4047 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4048 if (IS_ERR(rc->data_inode)) {
4049 err = PTR_ERR(rc->data_inode);
4050 rc->data_inode = NULL;
4051 goto out;
4052 }
4053
4054 printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n",
4055 (unsigned long long)rc->block_group->key.objectid,
4056 (unsigned long long)rc->block_group->flags);
4057
4058 btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
4059 btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0);
4060
4061 while (1) {
4062 mutex_lock(&fs_info->cleaner_mutex);
4063
4064 btrfs_clean_old_snapshots(fs_info->tree_root);
4065 ret = relocate_block_group(rc);
4066
4067 mutex_unlock(&fs_info->cleaner_mutex);
4068 if (ret < 0) {
4069 err = ret;
4070 goto out;
4071 }
4072
4073 if (rc->extents_found == 0)
4074 break;
4075
4076 printk(KERN_INFO "btrfs: found %llu extents\n",
4077 (unsigned long long)rc->extents_found);
4078
4079 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4080 btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1);
4081 invalidate_mapping_pages(rc->data_inode->i_mapping,
4082 0, -1);
4083 rc->stage = UPDATE_DATA_PTRS;
4084 }
4085 }
4086
4087 filemap_write_and_wait_range(fs_info->btree_inode->i_mapping,
4088 rc->block_group->key.objectid,
4089 rc->block_group->key.objectid +
4090 rc->block_group->key.offset - 1);
4091
4092 WARN_ON(rc->block_group->pinned > 0);
4093 WARN_ON(rc->block_group->reserved > 0);
4094 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
4095out:
4096 if (err && rw)
4097 btrfs_set_block_group_rw(extent_root, rc->block_group);
4098 iput(rc->data_inode);
4099 btrfs_put_block_group(rc->block_group);
4100 kfree(rc);
4101 return err;
4102}
4103
4104static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4105{
4106 struct btrfs_trans_handle *trans;
4107 int ret, err;
4108
4109 trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
4110 if (IS_ERR(trans))
4111 return PTR_ERR(trans);
4112
4113 memset(&root->root_item.drop_progress, 0,
4114 sizeof(root->root_item.drop_progress));
4115 root->root_item.drop_level = 0;
4116 btrfs_set_root_refs(&root->root_item, 0);
4117 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4118 &root->root_key, &root->root_item);
4119
4120 err = btrfs_end_transaction(trans, root->fs_info->tree_root);
4121 if (err)
4122 return err;
4123 return ret;
4124}
4125
4126/*
4127 * recover relocation interrupted by system crash.
4128 *
4129 * this function resumes merging reloc trees with corresponding fs trees.
4130 * this is important for keeping the sharing of tree blocks
4131 */
4132int btrfs_recover_relocation(struct btrfs_root *root)
4133{
4134 LIST_HEAD(reloc_roots);
4135 struct btrfs_key key;
4136 struct btrfs_root *fs_root;
4137 struct btrfs_root *reloc_root;
4138 struct btrfs_path *path;
4139 struct extent_buffer *leaf;
4140 struct reloc_control *rc = NULL;
4141 struct btrfs_trans_handle *trans;
4142 int ret;
4143 int err = 0;
4144
4145 path = btrfs_alloc_path();
4146 if (!path)
4147 return -ENOMEM;
4148 path->reada = -1;
4149
4150 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4151 key.type = BTRFS_ROOT_ITEM_KEY;
4152 key.offset = (u64)-1;
4153
4154 while (1) {
4155 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
4156 path, 0, 0);
4157 if (ret < 0) {
4158 err = ret;
4159 goto out;
4160 }
4161 if (ret > 0) {
4162 if (path->slots[0] == 0)
4163 break;
4164 path->slots[0]--;
4165 }
4166 leaf = path->nodes[0];
4167 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4168 btrfs_release_path(path);
4169
4170 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4171 key.type != BTRFS_ROOT_ITEM_KEY)
4172 break;
4173
4174 reloc_root = btrfs_read_fs_root_no_radix(root, &key);
4175 if (IS_ERR(reloc_root)) {
4176 err = PTR_ERR(reloc_root);
4177 goto out;
4178 }
4179
4180 list_add(&reloc_root->root_list, &reloc_roots);
4181
4182 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4183 fs_root = read_fs_root(root->fs_info,
4184 reloc_root->root_key.offset);
4185 if (IS_ERR(fs_root)) {
4186 ret = PTR_ERR(fs_root);
4187 if (ret != -ENOENT) {
4188 err = ret;
4189 goto out;
4190 }
4191 ret = mark_garbage_root(reloc_root);
4192 if (ret < 0) {
4193 err = ret;
4194 goto out;
4195 }
4196 }
4197 }
4198
4199 if (key.offset == 0)
4200 break;
4201
4202 key.offset--;
4203 }
4204 btrfs_release_path(path);
4205
4206 if (list_empty(&reloc_roots))
4207 goto out;
4208
4209 rc = alloc_reloc_control();
4210 if (!rc) {
4211 err = -ENOMEM;
4212 goto out;
4213 }
4214
4215 rc->extent_root = root->fs_info->extent_root;
4216
4217 set_reloc_control(rc);
4218
4219 trans = btrfs_join_transaction(rc->extent_root);
4220 if (IS_ERR(trans)) {
4221 unset_reloc_control(rc);
4222 err = PTR_ERR(trans);
4223 goto out_free;
4224 }
4225
4226 rc->merge_reloc_tree = 1;
4227
4228 while (!list_empty(&reloc_roots)) {
4229 reloc_root = list_entry(reloc_roots.next,
4230 struct btrfs_root, root_list);
4231 list_del(&reloc_root->root_list);
4232
4233 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4234 list_add_tail(&reloc_root->root_list,
4235 &rc->reloc_roots);
4236 continue;
4237 }
4238
4239 fs_root = read_fs_root(root->fs_info,
4240 reloc_root->root_key.offset);
4241 if (IS_ERR(fs_root)) {
4242 err = PTR_ERR(fs_root);
4243 goto out_free;
4244 }
4245
4246 err = __add_reloc_root(reloc_root);
4247 BUG_ON(err < 0); /* -ENOMEM or logic error */
4248 fs_root->reloc_root = reloc_root;
4249 }
4250
4251 err = btrfs_commit_transaction(trans, rc->extent_root);
4252 if (err)
4253 goto out_free;
4254
4255 merge_reloc_roots(rc);
4256
4257 unset_reloc_control(rc);
4258
4259 trans = btrfs_join_transaction(rc->extent_root);
4260 if (IS_ERR(trans))
4261 err = PTR_ERR(trans);
4262 else
4263 err = btrfs_commit_transaction(trans, rc->extent_root);
4264out_free:
4265 kfree(rc);
4266out:
4267 while (!list_empty(&reloc_roots)) {
4268 reloc_root = list_entry(reloc_roots.next,
4269 struct btrfs_root, root_list);
4270 list_del(&reloc_root->root_list);
4271 free_extent_buffer(reloc_root->node);
4272 free_extent_buffer(reloc_root->commit_root);
4273 kfree(reloc_root);
4274 }
4275 btrfs_free_path(path);
4276
4277 if (err == 0) {
4278 /* cleanup orphan inode in data relocation tree */
4279 fs_root = read_fs_root(root->fs_info,
4280 BTRFS_DATA_RELOC_TREE_OBJECTID);
4281 if (IS_ERR(fs_root))
4282 err = PTR_ERR(fs_root);
4283 else
4284 err = btrfs_orphan_cleanup(fs_root);
4285 }
4286 return err;
4287}
4288
4289/*
4290 * helper to add ordered checksum for data relocation.
4291 *
4292 * cloning checksum properly handles the nodatasum extents.
4293 * it also saves CPU time to re-calculate the checksum.
4294 */
4295int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4296{
4297 struct btrfs_ordered_sum *sums;
4298 struct btrfs_sector_sum *sector_sum;
4299 struct btrfs_ordered_extent *ordered;
4300 struct btrfs_root *root = BTRFS_I(inode)->root;
4301 size_t offset;
4302 int ret;
4303 u64 disk_bytenr;
4304 LIST_HEAD(list);
4305
4306 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4307 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
4308
4309 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4310 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
4311 disk_bytenr + len - 1, &list, 0);
4312 if (ret)
4313 goto out;
4314
4315 while (!list_empty(&list)) {
4316 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4317 list_del_init(&sums->list);
4318
4319 sector_sum = sums->sums;
4320 sums->bytenr = ordered->start;
4321
4322 offset = 0;
4323 while (offset < sums->len) {
4324 sector_sum->bytenr += ordered->start - disk_bytenr;
4325 sector_sum++;
4326 offset += root->sectorsize;
4327 }
4328
4329 btrfs_add_ordered_sum(inode, ordered, sums);
4330 }
4331out:
4332 btrfs_put_ordered_extent(ordered);
4333 return ret;
4334}
4335
4336void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4337 struct btrfs_root *root, struct extent_buffer *buf,
4338 struct extent_buffer *cow)
4339{
4340 struct reloc_control *rc;
4341 struct backref_node *node;
4342 int first_cow = 0;
4343 int level;
4344 int ret;
4345
4346 rc = root->fs_info->reloc_ctl;
4347 if (!rc)
4348 return;
4349
4350 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4351 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4352
4353 level = btrfs_header_level(buf);
4354 if (btrfs_header_generation(buf) <=
4355 btrfs_root_last_snapshot(&root->root_item))
4356 first_cow = 1;
4357
4358 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4359 rc->create_reloc_tree) {
4360 WARN_ON(!first_cow && level == 0);
4361
4362 node = rc->backref_cache.path[level];
4363 BUG_ON(node->bytenr != buf->start &&
4364 node->new_bytenr != buf->start);
4365
4366 drop_node_buffer(node);
4367 extent_buffer_get(cow);
4368 node->eb = cow;
4369 node->new_bytenr = cow->start;
4370
4371 if (!node->pending) {
4372 list_move_tail(&node->list,
4373 &rc->backref_cache.pending[level]);
4374 node->pending = 1;
4375 }
4376
4377 if (first_cow)
4378 __mark_block_processed(rc, node);
4379
4380 if (first_cow && level > 0)
4381 rc->nodes_relocated += buf->len;
4382 }
4383
4384 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) {
4385 ret = replace_file_extents(trans, rc, root, cow);
4386 BUG_ON(ret);
4387 }
4388}
4389
4390/*
4391 * called before creating snapshot. it calculates metadata reservation
4392 * requried for relocating tree blocks in the snapshot
4393 */
4394void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
4395 struct btrfs_pending_snapshot *pending,
4396 u64 *bytes_to_reserve)
4397{
4398 struct btrfs_root *root;
4399 struct reloc_control *rc;
4400
4401 root = pending->root;
4402 if (!root->reloc_root)
4403 return;
4404
4405 rc = root->fs_info->reloc_ctl;
4406 if (!rc->merge_reloc_tree)
4407 return;
4408
4409 root = root->reloc_root;
4410 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4411 /*
4412 * relocation is in the stage of merging trees. the space
4413 * used by merging a reloc tree is twice the size of
4414 * relocated tree nodes in the worst case. half for cowing
4415 * the reloc tree, half for cowing the fs tree. the space
4416 * used by cowing the reloc tree will be freed after the
4417 * tree is dropped. if we create snapshot, cowing the fs
4418 * tree may use more space than it frees. so we need
4419 * reserve extra space.
4420 */
4421 *bytes_to_reserve += rc->nodes_relocated;
4422}
4423
4424/*
4425 * called after snapshot is created. migrate block reservation
4426 * and create reloc root for the newly created snapshot
4427 */
4428int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4429 struct btrfs_pending_snapshot *pending)
4430{
4431 struct btrfs_root *root = pending->root;
4432 struct btrfs_root *reloc_root;
4433 struct btrfs_root *new_root;
4434 struct reloc_control *rc;
4435 int ret;
4436
4437 if (!root->reloc_root)
4438 return 0;
4439
4440 rc = root->fs_info->reloc_ctl;
4441 rc->merging_rsv_size += rc->nodes_relocated;
4442
4443 if (rc->merge_reloc_tree) {
4444 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4445 rc->block_rsv,
4446 rc->nodes_relocated);
4447 if (ret)
4448 return ret;
4449 }
4450
4451 new_root = pending->snap;
4452 reloc_root = create_reloc_root(trans, root->reloc_root,
4453 new_root->root_key.objectid);
4454 if (IS_ERR(reloc_root))
4455 return PTR_ERR(reloc_root);
4456
4457 ret = __add_reloc_root(reloc_root);
4458 BUG_ON(ret < 0);
4459 new_root->reloc_root = reloc_root;
4460
4461 if (rc->create_reloc_tree)
4462 ret = clone_backref_node(trans, rc, root, reloc_root);
4463 return ret;
4464}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>
9#include <linux/blkdev.h>
10#include <linux/rbtree.h>
11#include <linux/slab.h>
12#include <linux/error-injection.h>
13#include "ctree.h"
14#include "disk-io.h"
15#include "transaction.h"
16#include "volumes.h"
17#include "locking.h"
18#include "btrfs_inode.h"
19#include "async-thread.h"
20#include "free-space-cache.h"
21#include "qgroup.h"
22#include "print-tree.h"
23#include "delalloc-space.h"
24#include "block-group.h"
25#include "backref.h"
26#include "misc.h"
27#include "subpage.h"
28#include "zoned.h"
29#include "inode-item.h"
30#include "space-info.h"
31#include "fs.h"
32#include "accessors.h"
33#include "extent-tree.h"
34#include "root-tree.h"
35#include "file-item.h"
36#include "relocation.h"
37#include "super.h"
38#include "tree-checker.h"
39
40/*
41 * Relocation overview
42 *
43 * [What does relocation do]
44 *
45 * The objective of relocation is to relocate all extents of the target block
46 * group to other block groups.
47 * This is utilized by resize (shrink only), profile converting, compacting
48 * space, or balance routine to spread chunks over devices.
49 *
50 * Before | After
51 * ------------------------------------------------------------------
52 * BG A: 10 data extents | BG A: deleted
53 * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
54 * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
55 *
56 * [How does relocation work]
57 *
58 * 1. Mark the target block group read-only
59 * New extents won't be allocated from the target block group.
60 *
61 * 2.1 Record each extent in the target block group
62 * To build a proper map of extents to be relocated.
63 *
64 * 2.2 Build data reloc tree and reloc trees
65 * Data reloc tree will contain an inode, recording all newly relocated
66 * data extents.
67 * There will be only one data reloc tree for one data block group.
68 *
69 * Reloc tree will be a special snapshot of its source tree, containing
70 * relocated tree blocks.
71 * Each tree referring to a tree block in target block group will get its
72 * reloc tree built.
73 *
74 * 2.3 Swap source tree with its corresponding reloc tree
75 * Each involved tree only refers to new extents after swap.
76 *
77 * 3. Cleanup reloc trees and data reloc tree.
78 * As old extents in the target block group are still referenced by reloc
79 * trees, we need to clean them up before really freeing the target block
80 * group.
81 *
82 * The main complexity is in steps 2.2 and 2.3.
83 *
84 * The entry point of relocation is relocate_block_group() function.
85 */
86
87#define RELOCATION_RESERVED_NODES 256
88/*
89 * map address of tree root to tree
90 */
91struct mapping_node {
92 struct {
93 struct rb_node rb_node;
94 u64 bytenr;
95 }; /* Use rb_simle_node for search/insert */
96 void *data;
97};
98
99struct mapping_tree {
100 struct rb_root rb_root;
101 spinlock_t lock;
102};
103
104/*
105 * present a tree block to process
106 */
107struct tree_block {
108 struct {
109 struct rb_node rb_node;
110 u64 bytenr;
111 }; /* Use rb_simple_node for search/insert */
112 u64 owner;
113 struct btrfs_key key;
114 unsigned int level:8;
115 unsigned int key_ready:1;
116};
117
118#define MAX_EXTENTS 128
119
120struct file_extent_cluster {
121 u64 start;
122 u64 end;
123 u64 boundary[MAX_EXTENTS];
124 unsigned int nr;
125};
126
127struct reloc_control {
128 /* block group to relocate */
129 struct btrfs_block_group *block_group;
130 /* extent tree */
131 struct btrfs_root *extent_root;
132 /* inode for moving data */
133 struct inode *data_inode;
134
135 struct btrfs_block_rsv *block_rsv;
136
137 struct btrfs_backref_cache backref_cache;
138
139 struct file_extent_cluster cluster;
140 /* tree blocks have been processed */
141 struct extent_io_tree processed_blocks;
142 /* map start of tree root to corresponding reloc tree */
143 struct mapping_tree reloc_root_tree;
144 /* list of reloc trees */
145 struct list_head reloc_roots;
146 /* list of subvolume trees that get relocated */
147 struct list_head dirty_subvol_roots;
148 /* size of metadata reservation for merging reloc trees */
149 u64 merging_rsv_size;
150 /* size of relocated tree nodes */
151 u64 nodes_relocated;
152 /* reserved size for block group relocation*/
153 u64 reserved_bytes;
154
155 u64 search_start;
156 u64 extents_found;
157
158 unsigned int stage:8;
159 unsigned int create_reloc_tree:1;
160 unsigned int merge_reloc_tree:1;
161 unsigned int found_file_extent:1;
162};
163
164/* stages of data relocation */
165#define MOVE_DATA_EXTENTS 0
166#define UPDATE_DATA_PTRS 1
167
168static void mark_block_processed(struct reloc_control *rc,
169 struct btrfs_backref_node *node)
170{
171 u32 blocksize;
172
173 if (node->level == 0 ||
174 in_range(node->bytenr, rc->block_group->start,
175 rc->block_group->length)) {
176 blocksize = rc->extent_root->fs_info->nodesize;
177 set_extent_bits(&rc->processed_blocks, node->bytenr,
178 node->bytenr + blocksize - 1, EXTENT_DIRTY);
179 }
180 node->processed = 1;
181}
182
183
184static void mapping_tree_init(struct mapping_tree *tree)
185{
186 tree->rb_root = RB_ROOT;
187 spin_lock_init(&tree->lock);
188}
189
190/*
191 * walk up backref nodes until reach node presents tree root
192 */
193static struct btrfs_backref_node *walk_up_backref(
194 struct btrfs_backref_node *node,
195 struct btrfs_backref_edge *edges[], int *index)
196{
197 struct btrfs_backref_edge *edge;
198 int idx = *index;
199
200 while (!list_empty(&node->upper)) {
201 edge = list_entry(node->upper.next,
202 struct btrfs_backref_edge, list[LOWER]);
203 edges[idx++] = edge;
204 node = edge->node[UPPER];
205 }
206 BUG_ON(node->detached);
207 *index = idx;
208 return node;
209}
210
211/*
212 * walk down backref nodes to find start of next reference path
213 */
214static struct btrfs_backref_node *walk_down_backref(
215 struct btrfs_backref_edge *edges[], int *index)
216{
217 struct btrfs_backref_edge *edge;
218 struct btrfs_backref_node *lower;
219 int idx = *index;
220
221 while (idx > 0) {
222 edge = edges[idx - 1];
223 lower = edge->node[LOWER];
224 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
225 idx--;
226 continue;
227 }
228 edge = list_entry(edge->list[LOWER].next,
229 struct btrfs_backref_edge, list[LOWER]);
230 edges[idx - 1] = edge;
231 *index = idx;
232 return edge->node[UPPER];
233 }
234 *index = 0;
235 return NULL;
236}
237
238static void update_backref_node(struct btrfs_backref_cache *cache,
239 struct btrfs_backref_node *node, u64 bytenr)
240{
241 struct rb_node *rb_node;
242 rb_erase(&node->rb_node, &cache->rb_root);
243 node->bytenr = bytenr;
244 rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
245 if (rb_node)
246 btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
247}
248
249/*
250 * update backref cache after a transaction commit
251 */
252static int update_backref_cache(struct btrfs_trans_handle *trans,
253 struct btrfs_backref_cache *cache)
254{
255 struct btrfs_backref_node *node;
256 int level = 0;
257
258 if (cache->last_trans == 0) {
259 cache->last_trans = trans->transid;
260 return 0;
261 }
262
263 if (cache->last_trans == trans->transid)
264 return 0;
265
266 /*
267 * detached nodes are used to avoid unnecessary backref
268 * lookup. transaction commit changes the extent tree.
269 * so the detached nodes are no longer useful.
270 */
271 while (!list_empty(&cache->detached)) {
272 node = list_entry(cache->detached.next,
273 struct btrfs_backref_node, list);
274 btrfs_backref_cleanup_node(cache, node);
275 }
276
277 while (!list_empty(&cache->changed)) {
278 node = list_entry(cache->changed.next,
279 struct btrfs_backref_node, list);
280 list_del_init(&node->list);
281 BUG_ON(node->pending);
282 update_backref_node(cache, node, node->new_bytenr);
283 }
284
285 /*
286 * some nodes can be left in the pending list if there were
287 * errors during processing the pending nodes.
288 */
289 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
290 list_for_each_entry(node, &cache->pending[level], list) {
291 BUG_ON(!node->pending);
292 if (node->bytenr == node->new_bytenr)
293 continue;
294 update_backref_node(cache, node, node->new_bytenr);
295 }
296 }
297
298 cache->last_trans = 0;
299 return 1;
300}
301
302static bool reloc_root_is_dead(struct btrfs_root *root)
303{
304 /*
305 * Pair with set_bit/clear_bit in clean_dirty_subvols and
306 * btrfs_update_reloc_root. We need to see the updated bit before
307 * trying to access reloc_root
308 */
309 smp_rmb();
310 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
311 return true;
312 return false;
313}
314
315/*
316 * Check if this subvolume tree has valid reloc tree.
317 *
318 * Reloc tree after swap is considered dead, thus not considered as valid.
319 * This is enough for most callers, as they don't distinguish dead reloc root
320 * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
321 * special case.
322 */
323static bool have_reloc_root(struct btrfs_root *root)
324{
325 if (reloc_root_is_dead(root))
326 return false;
327 if (!root->reloc_root)
328 return false;
329 return true;
330}
331
332int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
333{
334 struct btrfs_root *reloc_root;
335
336 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
337 return 0;
338
339 /* This root has been merged with its reloc tree, we can ignore it */
340 if (reloc_root_is_dead(root))
341 return 1;
342
343 reloc_root = root->reloc_root;
344 if (!reloc_root)
345 return 0;
346
347 if (btrfs_header_generation(reloc_root->commit_root) ==
348 root->fs_info->running_transaction->transid)
349 return 0;
350 /*
351 * if there is reloc tree and it was created in previous
352 * transaction backref lookup can find the reloc tree,
353 * so backref node for the fs tree root is useless for
354 * relocation.
355 */
356 return 1;
357}
358
359/*
360 * find reloc tree by address of tree root
361 */
362struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
363{
364 struct reloc_control *rc = fs_info->reloc_ctl;
365 struct rb_node *rb_node;
366 struct mapping_node *node;
367 struct btrfs_root *root = NULL;
368
369 ASSERT(rc);
370 spin_lock(&rc->reloc_root_tree.lock);
371 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
372 if (rb_node) {
373 node = rb_entry(rb_node, struct mapping_node, rb_node);
374 root = node->data;
375 }
376 spin_unlock(&rc->reloc_root_tree.lock);
377 return btrfs_grab_root(root);
378}
379
380/*
381 * For useless nodes, do two major clean ups:
382 *
383 * - Cleanup the children edges and nodes
384 * If child node is also orphan (no parent) during cleanup, then the child
385 * node will also be cleaned up.
386 *
387 * - Freeing up leaves (level 0), keeps nodes detached
388 * For nodes, the node is still cached as "detached"
389 *
390 * Return false if @node is not in the @useless_nodes list.
391 * Return true if @node is in the @useless_nodes list.
392 */
393static bool handle_useless_nodes(struct reloc_control *rc,
394 struct btrfs_backref_node *node)
395{
396 struct btrfs_backref_cache *cache = &rc->backref_cache;
397 struct list_head *useless_node = &cache->useless_node;
398 bool ret = false;
399
400 while (!list_empty(useless_node)) {
401 struct btrfs_backref_node *cur;
402
403 cur = list_first_entry(useless_node, struct btrfs_backref_node,
404 list);
405 list_del_init(&cur->list);
406
407 /* Only tree root nodes can be added to @useless_nodes */
408 ASSERT(list_empty(&cur->upper));
409
410 if (cur == node)
411 ret = true;
412
413 /* The node is the lowest node */
414 if (cur->lowest) {
415 list_del_init(&cur->lower);
416 cur->lowest = 0;
417 }
418
419 /* Cleanup the lower edges */
420 while (!list_empty(&cur->lower)) {
421 struct btrfs_backref_edge *edge;
422 struct btrfs_backref_node *lower;
423
424 edge = list_entry(cur->lower.next,
425 struct btrfs_backref_edge, list[UPPER]);
426 list_del(&edge->list[UPPER]);
427 list_del(&edge->list[LOWER]);
428 lower = edge->node[LOWER];
429 btrfs_backref_free_edge(cache, edge);
430
431 /* Child node is also orphan, queue for cleanup */
432 if (list_empty(&lower->upper))
433 list_add(&lower->list, useless_node);
434 }
435 /* Mark this block processed for relocation */
436 mark_block_processed(rc, cur);
437
438 /*
439 * Backref nodes for tree leaves are deleted from the cache.
440 * Backref nodes for upper level tree blocks are left in the
441 * cache to avoid unnecessary backref lookup.
442 */
443 if (cur->level > 0) {
444 list_add(&cur->list, &cache->detached);
445 cur->detached = 1;
446 } else {
447 rb_erase(&cur->rb_node, &cache->rb_root);
448 btrfs_backref_free_node(cache, cur);
449 }
450 }
451 return ret;
452}
453
454/*
455 * Build backref tree for a given tree block. Root of the backref tree
456 * corresponds the tree block, leaves of the backref tree correspond roots of
457 * b-trees that reference the tree block.
458 *
459 * The basic idea of this function is check backrefs of a given block to find
460 * upper level blocks that reference the block, and then check backrefs of
461 * these upper level blocks recursively. The recursion stops when tree root is
462 * reached or backrefs for the block is cached.
463 *
464 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
465 * all upper level blocks that directly/indirectly reference the block are also
466 * cached.
467 */
468static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
469 struct reloc_control *rc, struct btrfs_key *node_key,
470 int level, u64 bytenr)
471{
472 struct btrfs_backref_iter *iter;
473 struct btrfs_backref_cache *cache = &rc->backref_cache;
474 /* For searching parent of TREE_BLOCK_REF */
475 struct btrfs_path *path;
476 struct btrfs_backref_node *cur;
477 struct btrfs_backref_node *node = NULL;
478 struct btrfs_backref_edge *edge;
479 int ret;
480 int err = 0;
481
482 iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
483 if (!iter)
484 return ERR_PTR(-ENOMEM);
485 path = btrfs_alloc_path();
486 if (!path) {
487 err = -ENOMEM;
488 goto out;
489 }
490
491 node = btrfs_backref_alloc_node(cache, bytenr, level);
492 if (!node) {
493 err = -ENOMEM;
494 goto out;
495 }
496
497 node->lowest = 1;
498 cur = node;
499
500 /* Breadth-first search to build backref cache */
501 do {
502 ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
503 cur);
504 if (ret < 0) {
505 err = ret;
506 goto out;
507 }
508 edge = list_first_entry_or_null(&cache->pending_edge,
509 struct btrfs_backref_edge, list[UPPER]);
510 /*
511 * The pending list isn't empty, take the first block to
512 * process
513 */
514 if (edge) {
515 list_del_init(&edge->list[UPPER]);
516 cur = edge->node[UPPER];
517 }
518 } while (edge);
519
520 /* Finish the upper linkage of newly added edges/nodes */
521 ret = btrfs_backref_finish_upper_links(cache, node);
522 if (ret < 0) {
523 err = ret;
524 goto out;
525 }
526
527 if (handle_useless_nodes(rc, node))
528 node = NULL;
529out:
530 btrfs_backref_iter_free(iter);
531 btrfs_free_path(path);
532 if (err) {
533 btrfs_backref_error_cleanup(cache, node);
534 return ERR_PTR(err);
535 }
536 ASSERT(!node || !node->detached);
537 ASSERT(list_empty(&cache->useless_node) &&
538 list_empty(&cache->pending_edge));
539 return node;
540}
541
542/*
543 * helper to add backref node for the newly created snapshot.
544 * the backref node is created by cloning backref node that
545 * corresponds to root of source tree
546 */
547static int clone_backref_node(struct btrfs_trans_handle *trans,
548 struct reloc_control *rc,
549 struct btrfs_root *src,
550 struct btrfs_root *dest)
551{
552 struct btrfs_root *reloc_root = src->reloc_root;
553 struct btrfs_backref_cache *cache = &rc->backref_cache;
554 struct btrfs_backref_node *node = NULL;
555 struct btrfs_backref_node *new_node;
556 struct btrfs_backref_edge *edge;
557 struct btrfs_backref_edge *new_edge;
558 struct rb_node *rb_node;
559
560 if (cache->last_trans > 0)
561 update_backref_cache(trans, cache);
562
563 rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
564 if (rb_node) {
565 node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
566 if (node->detached)
567 node = NULL;
568 else
569 BUG_ON(node->new_bytenr != reloc_root->node->start);
570 }
571
572 if (!node) {
573 rb_node = rb_simple_search(&cache->rb_root,
574 reloc_root->commit_root->start);
575 if (rb_node) {
576 node = rb_entry(rb_node, struct btrfs_backref_node,
577 rb_node);
578 BUG_ON(node->detached);
579 }
580 }
581
582 if (!node)
583 return 0;
584
585 new_node = btrfs_backref_alloc_node(cache, dest->node->start,
586 node->level);
587 if (!new_node)
588 return -ENOMEM;
589
590 new_node->lowest = node->lowest;
591 new_node->checked = 1;
592 new_node->root = btrfs_grab_root(dest);
593 ASSERT(new_node->root);
594
595 if (!node->lowest) {
596 list_for_each_entry(edge, &node->lower, list[UPPER]) {
597 new_edge = btrfs_backref_alloc_edge(cache);
598 if (!new_edge)
599 goto fail;
600
601 btrfs_backref_link_edge(new_edge, edge->node[LOWER],
602 new_node, LINK_UPPER);
603 }
604 } else {
605 list_add_tail(&new_node->lower, &cache->leaves);
606 }
607
608 rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
609 &new_node->rb_node);
610 if (rb_node)
611 btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
612
613 if (!new_node->lowest) {
614 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
615 list_add_tail(&new_edge->list[LOWER],
616 &new_edge->node[LOWER]->upper);
617 }
618 }
619 return 0;
620fail:
621 while (!list_empty(&new_node->lower)) {
622 new_edge = list_entry(new_node->lower.next,
623 struct btrfs_backref_edge, list[UPPER]);
624 list_del(&new_edge->list[UPPER]);
625 btrfs_backref_free_edge(cache, new_edge);
626 }
627 btrfs_backref_free_node(cache, new_node);
628 return -ENOMEM;
629}
630
631/*
632 * helper to add 'address of tree root -> reloc tree' mapping
633 */
634static int __must_check __add_reloc_root(struct btrfs_root *root)
635{
636 struct btrfs_fs_info *fs_info = root->fs_info;
637 struct rb_node *rb_node;
638 struct mapping_node *node;
639 struct reloc_control *rc = fs_info->reloc_ctl;
640
641 node = kmalloc(sizeof(*node), GFP_NOFS);
642 if (!node)
643 return -ENOMEM;
644
645 node->bytenr = root->commit_root->start;
646 node->data = root;
647
648 spin_lock(&rc->reloc_root_tree.lock);
649 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
650 node->bytenr, &node->rb_node);
651 spin_unlock(&rc->reloc_root_tree.lock);
652 if (rb_node) {
653 btrfs_err(fs_info,
654 "Duplicate root found for start=%llu while inserting into relocation tree",
655 node->bytenr);
656 return -EEXIST;
657 }
658
659 list_add_tail(&root->root_list, &rc->reloc_roots);
660 return 0;
661}
662
663/*
664 * helper to delete the 'address of tree root -> reloc tree'
665 * mapping
666 */
667static void __del_reloc_root(struct btrfs_root *root)
668{
669 struct btrfs_fs_info *fs_info = root->fs_info;
670 struct rb_node *rb_node;
671 struct mapping_node *node = NULL;
672 struct reloc_control *rc = fs_info->reloc_ctl;
673 bool put_ref = false;
674
675 if (rc && root->node) {
676 spin_lock(&rc->reloc_root_tree.lock);
677 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
678 root->commit_root->start);
679 if (rb_node) {
680 node = rb_entry(rb_node, struct mapping_node, rb_node);
681 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
682 RB_CLEAR_NODE(&node->rb_node);
683 }
684 spin_unlock(&rc->reloc_root_tree.lock);
685 ASSERT(!node || (struct btrfs_root *)node->data == root);
686 }
687
688 /*
689 * We only put the reloc root here if it's on the list. There's a lot
690 * of places where the pattern is to splice the rc->reloc_roots, process
691 * the reloc roots, and then add the reloc root back onto
692 * rc->reloc_roots. If we call __del_reloc_root while it's off of the
693 * list we don't want the reference being dropped, because the guy
694 * messing with the list is in charge of the reference.
695 */
696 spin_lock(&fs_info->trans_lock);
697 if (!list_empty(&root->root_list)) {
698 put_ref = true;
699 list_del_init(&root->root_list);
700 }
701 spin_unlock(&fs_info->trans_lock);
702 if (put_ref)
703 btrfs_put_root(root);
704 kfree(node);
705}
706
707/*
708 * helper to update the 'address of tree root -> reloc tree'
709 * mapping
710 */
711static int __update_reloc_root(struct btrfs_root *root)
712{
713 struct btrfs_fs_info *fs_info = root->fs_info;
714 struct rb_node *rb_node;
715 struct mapping_node *node = NULL;
716 struct reloc_control *rc = fs_info->reloc_ctl;
717
718 spin_lock(&rc->reloc_root_tree.lock);
719 rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
720 root->commit_root->start);
721 if (rb_node) {
722 node = rb_entry(rb_node, struct mapping_node, rb_node);
723 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
724 }
725 spin_unlock(&rc->reloc_root_tree.lock);
726
727 if (!node)
728 return 0;
729 BUG_ON((struct btrfs_root *)node->data != root);
730
731 spin_lock(&rc->reloc_root_tree.lock);
732 node->bytenr = root->node->start;
733 rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
734 node->bytenr, &node->rb_node);
735 spin_unlock(&rc->reloc_root_tree.lock);
736 if (rb_node)
737 btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
738 return 0;
739}
740
741static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
742 struct btrfs_root *root, u64 objectid)
743{
744 struct btrfs_fs_info *fs_info = root->fs_info;
745 struct btrfs_root *reloc_root;
746 struct extent_buffer *eb;
747 struct btrfs_root_item *root_item;
748 struct btrfs_key root_key;
749 int ret = 0;
750 bool must_abort = false;
751
752 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
753 if (!root_item)
754 return ERR_PTR(-ENOMEM);
755
756 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
757 root_key.type = BTRFS_ROOT_ITEM_KEY;
758 root_key.offset = objectid;
759
760 if (root->root_key.objectid == objectid) {
761 u64 commit_root_gen;
762
763 /* called by btrfs_init_reloc_root */
764 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
765 BTRFS_TREE_RELOC_OBJECTID);
766 if (ret)
767 goto fail;
768
769 /*
770 * Set the last_snapshot field to the generation of the commit
771 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
772 * correctly (returns true) when the relocation root is created
773 * either inside the critical section of a transaction commit
774 * (through transaction.c:qgroup_account_snapshot()) and when
775 * it's created before the transaction commit is started.
776 */
777 commit_root_gen = btrfs_header_generation(root->commit_root);
778 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
779 } else {
780 /*
781 * called by btrfs_reloc_post_snapshot_hook.
782 * the source tree is a reloc tree, all tree blocks
783 * modified after it was created have RELOC flag
784 * set in their headers. so it's OK to not update
785 * the 'last_snapshot'.
786 */
787 ret = btrfs_copy_root(trans, root, root->node, &eb,
788 BTRFS_TREE_RELOC_OBJECTID);
789 if (ret)
790 goto fail;
791 }
792
793 /*
794 * We have changed references at this point, we must abort the
795 * transaction if anything fails.
796 */
797 must_abort = true;
798
799 memcpy(root_item, &root->root_item, sizeof(*root_item));
800 btrfs_set_root_bytenr(root_item, eb->start);
801 btrfs_set_root_level(root_item, btrfs_header_level(eb));
802 btrfs_set_root_generation(root_item, trans->transid);
803
804 if (root->root_key.objectid == objectid) {
805 btrfs_set_root_refs(root_item, 0);
806 memset(&root_item->drop_progress, 0,
807 sizeof(struct btrfs_disk_key));
808 btrfs_set_root_drop_level(root_item, 0);
809 }
810
811 btrfs_tree_unlock(eb);
812 free_extent_buffer(eb);
813
814 ret = btrfs_insert_root(trans, fs_info->tree_root,
815 &root_key, root_item);
816 if (ret)
817 goto fail;
818
819 kfree(root_item);
820
821 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
822 if (IS_ERR(reloc_root)) {
823 ret = PTR_ERR(reloc_root);
824 goto abort;
825 }
826 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
827 reloc_root->last_trans = trans->transid;
828 return reloc_root;
829fail:
830 kfree(root_item);
831abort:
832 if (must_abort)
833 btrfs_abort_transaction(trans, ret);
834 return ERR_PTR(ret);
835}
836
837/*
838 * create reloc tree for a given fs tree. reloc tree is just a
839 * snapshot of the fs tree with special root objectid.
840 *
841 * The reloc_root comes out of here with two references, one for
842 * root->reloc_root, and another for being on the rc->reloc_roots list.
843 */
844int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
845 struct btrfs_root *root)
846{
847 struct btrfs_fs_info *fs_info = root->fs_info;
848 struct btrfs_root *reloc_root;
849 struct reloc_control *rc = fs_info->reloc_ctl;
850 struct btrfs_block_rsv *rsv;
851 int clear_rsv = 0;
852 int ret;
853
854 if (!rc)
855 return 0;
856
857 /*
858 * The subvolume has reloc tree but the swap is finished, no need to
859 * create/update the dead reloc tree
860 */
861 if (reloc_root_is_dead(root))
862 return 0;
863
864 /*
865 * This is subtle but important. We do not do
866 * record_root_in_transaction for reloc roots, instead we record their
867 * corresponding fs root, and then here we update the last trans for the
868 * reloc root. This means that we have to do this for the entire life
869 * of the reloc root, regardless of which stage of the relocation we are
870 * in.
871 */
872 if (root->reloc_root) {
873 reloc_root = root->reloc_root;
874 reloc_root->last_trans = trans->transid;
875 return 0;
876 }
877
878 /*
879 * We are merging reloc roots, we do not need new reloc trees. Also
880 * reloc trees never need their own reloc tree.
881 */
882 if (!rc->create_reloc_tree ||
883 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
884 return 0;
885
886 if (!trans->reloc_reserved) {
887 rsv = trans->block_rsv;
888 trans->block_rsv = rc->block_rsv;
889 clear_rsv = 1;
890 }
891 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
892 if (clear_rsv)
893 trans->block_rsv = rsv;
894 if (IS_ERR(reloc_root))
895 return PTR_ERR(reloc_root);
896
897 ret = __add_reloc_root(reloc_root);
898 ASSERT(ret != -EEXIST);
899 if (ret) {
900 /* Pairs with create_reloc_root */
901 btrfs_put_root(reloc_root);
902 return ret;
903 }
904 root->reloc_root = btrfs_grab_root(reloc_root);
905 return 0;
906}
907
908/*
909 * update root item of reloc tree
910 */
911int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
912 struct btrfs_root *root)
913{
914 struct btrfs_fs_info *fs_info = root->fs_info;
915 struct btrfs_root *reloc_root;
916 struct btrfs_root_item *root_item;
917 int ret;
918
919 if (!have_reloc_root(root))
920 return 0;
921
922 reloc_root = root->reloc_root;
923 root_item = &reloc_root->root_item;
924
925 /*
926 * We are probably ok here, but __del_reloc_root() will drop its ref of
927 * the root. We have the ref for root->reloc_root, but just in case
928 * hold it while we update the reloc root.
929 */
930 btrfs_grab_root(reloc_root);
931
932 /* root->reloc_root will stay until current relocation finished */
933 if (fs_info->reloc_ctl->merge_reloc_tree &&
934 btrfs_root_refs(root_item) == 0) {
935 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
936 /*
937 * Mark the tree as dead before we change reloc_root so
938 * have_reloc_root will not touch it from now on.
939 */
940 smp_wmb();
941 __del_reloc_root(reloc_root);
942 }
943
944 if (reloc_root->commit_root != reloc_root->node) {
945 __update_reloc_root(reloc_root);
946 btrfs_set_root_node(root_item, reloc_root->node);
947 free_extent_buffer(reloc_root->commit_root);
948 reloc_root->commit_root = btrfs_root_node(reloc_root);
949 }
950
951 ret = btrfs_update_root(trans, fs_info->tree_root,
952 &reloc_root->root_key, root_item);
953 btrfs_put_root(reloc_root);
954 return ret;
955}
956
957/*
958 * helper to find first cached inode with inode number >= objectid
959 * in a subvolume
960 */
961static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
962{
963 struct rb_node *node;
964 struct rb_node *prev;
965 struct btrfs_inode *entry;
966 struct inode *inode;
967
968 spin_lock(&root->inode_lock);
969again:
970 node = root->inode_tree.rb_node;
971 prev = NULL;
972 while (node) {
973 prev = node;
974 entry = rb_entry(node, struct btrfs_inode, rb_node);
975
976 if (objectid < btrfs_ino(entry))
977 node = node->rb_left;
978 else if (objectid > btrfs_ino(entry))
979 node = node->rb_right;
980 else
981 break;
982 }
983 if (!node) {
984 while (prev) {
985 entry = rb_entry(prev, struct btrfs_inode, rb_node);
986 if (objectid <= btrfs_ino(entry)) {
987 node = prev;
988 break;
989 }
990 prev = rb_next(prev);
991 }
992 }
993 while (node) {
994 entry = rb_entry(node, struct btrfs_inode, rb_node);
995 inode = igrab(&entry->vfs_inode);
996 if (inode) {
997 spin_unlock(&root->inode_lock);
998 return inode;
999 }
1000
1001 objectid = btrfs_ino(entry) + 1;
1002 if (cond_resched_lock(&root->inode_lock))
1003 goto again;
1004
1005 node = rb_next(node);
1006 }
1007 spin_unlock(&root->inode_lock);
1008 return NULL;
1009}
1010
1011/*
1012 * get new location of data
1013 */
1014static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1015 u64 bytenr, u64 num_bytes)
1016{
1017 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1018 struct btrfs_path *path;
1019 struct btrfs_file_extent_item *fi;
1020 struct extent_buffer *leaf;
1021 int ret;
1022
1023 path = btrfs_alloc_path();
1024 if (!path)
1025 return -ENOMEM;
1026
1027 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1028 ret = btrfs_lookup_file_extent(NULL, root, path,
1029 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1030 if (ret < 0)
1031 goto out;
1032 if (ret > 0) {
1033 ret = -ENOENT;
1034 goto out;
1035 }
1036
1037 leaf = path->nodes[0];
1038 fi = btrfs_item_ptr(leaf, path->slots[0],
1039 struct btrfs_file_extent_item);
1040
1041 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1042 btrfs_file_extent_compression(leaf, fi) ||
1043 btrfs_file_extent_encryption(leaf, fi) ||
1044 btrfs_file_extent_other_encoding(leaf, fi));
1045
1046 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1047 ret = -EINVAL;
1048 goto out;
1049 }
1050
1051 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1052 ret = 0;
1053out:
1054 btrfs_free_path(path);
1055 return ret;
1056}
1057
1058/*
1059 * update file extent items in the tree leaf to point to
1060 * the new locations.
1061 */
1062static noinline_for_stack
1063int replace_file_extents(struct btrfs_trans_handle *trans,
1064 struct reloc_control *rc,
1065 struct btrfs_root *root,
1066 struct extent_buffer *leaf)
1067{
1068 struct btrfs_fs_info *fs_info = root->fs_info;
1069 struct btrfs_key key;
1070 struct btrfs_file_extent_item *fi;
1071 struct inode *inode = NULL;
1072 u64 parent;
1073 u64 bytenr;
1074 u64 new_bytenr = 0;
1075 u64 num_bytes;
1076 u64 end;
1077 u32 nritems;
1078 u32 i;
1079 int ret = 0;
1080 int first = 1;
1081 int dirty = 0;
1082
1083 if (rc->stage != UPDATE_DATA_PTRS)
1084 return 0;
1085
1086 /* reloc trees always use full backref */
1087 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1088 parent = leaf->start;
1089 else
1090 parent = 0;
1091
1092 nritems = btrfs_header_nritems(leaf);
1093 for (i = 0; i < nritems; i++) {
1094 struct btrfs_ref ref = { 0 };
1095
1096 cond_resched();
1097 btrfs_item_key_to_cpu(leaf, &key, i);
1098 if (key.type != BTRFS_EXTENT_DATA_KEY)
1099 continue;
1100 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1101 if (btrfs_file_extent_type(leaf, fi) ==
1102 BTRFS_FILE_EXTENT_INLINE)
1103 continue;
1104 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1105 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1106 if (bytenr == 0)
1107 continue;
1108 if (!in_range(bytenr, rc->block_group->start,
1109 rc->block_group->length))
1110 continue;
1111
1112 /*
1113 * if we are modifying block in fs tree, wait for read_folio
1114 * to complete and drop the extent cache
1115 */
1116 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1117 if (first) {
1118 inode = find_next_inode(root, key.objectid);
1119 first = 0;
1120 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1121 btrfs_add_delayed_iput(BTRFS_I(inode));
1122 inode = find_next_inode(root, key.objectid);
1123 }
1124 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1125 struct extent_state *cached_state = NULL;
1126
1127 end = key.offset +
1128 btrfs_file_extent_num_bytes(leaf, fi);
1129 WARN_ON(!IS_ALIGNED(key.offset,
1130 fs_info->sectorsize));
1131 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1132 end--;
1133 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1134 key.offset, end,
1135 &cached_state);
1136 if (!ret)
1137 continue;
1138
1139 btrfs_drop_extent_map_range(BTRFS_I(inode),
1140 key.offset, end, true);
1141 unlock_extent(&BTRFS_I(inode)->io_tree,
1142 key.offset, end, &cached_state);
1143 }
1144 }
1145
1146 ret = get_new_location(rc->data_inode, &new_bytenr,
1147 bytenr, num_bytes);
1148 if (ret) {
1149 /*
1150 * Don't have to abort since we've not changed anything
1151 * in the file extent yet.
1152 */
1153 break;
1154 }
1155
1156 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1157 dirty = 1;
1158
1159 key.offset -= btrfs_file_extent_offset(leaf, fi);
1160 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1161 num_bytes, parent);
1162 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1163 key.objectid, key.offset,
1164 root->root_key.objectid, false);
1165 ret = btrfs_inc_extent_ref(trans, &ref);
1166 if (ret) {
1167 btrfs_abort_transaction(trans, ret);
1168 break;
1169 }
1170
1171 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1172 num_bytes, parent);
1173 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1174 key.objectid, key.offset,
1175 root->root_key.objectid, false);
1176 ret = btrfs_free_extent(trans, &ref);
1177 if (ret) {
1178 btrfs_abort_transaction(trans, ret);
1179 break;
1180 }
1181 }
1182 if (dirty)
1183 btrfs_mark_buffer_dirty(leaf);
1184 if (inode)
1185 btrfs_add_delayed_iput(BTRFS_I(inode));
1186 return ret;
1187}
1188
1189static noinline_for_stack
1190int memcmp_node_keys(struct extent_buffer *eb, int slot,
1191 struct btrfs_path *path, int level)
1192{
1193 struct btrfs_disk_key key1;
1194 struct btrfs_disk_key key2;
1195 btrfs_node_key(eb, &key1, slot);
1196 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1197 return memcmp(&key1, &key2, sizeof(key1));
1198}
1199
1200/*
1201 * try to replace tree blocks in fs tree with the new blocks
1202 * in reloc tree. tree blocks haven't been modified since the
1203 * reloc tree was create can be replaced.
1204 *
1205 * if a block was replaced, level of the block + 1 is returned.
1206 * if no block got replaced, 0 is returned. if there are other
1207 * errors, a negative error number is returned.
1208 */
1209static noinline_for_stack
1210int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1211 struct btrfs_root *dest, struct btrfs_root *src,
1212 struct btrfs_path *path, struct btrfs_key *next_key,
1213 int lowest_level, int max_level)
1214{
1215 struct btrfs_fs_info *fs_info = dest->fs_info;
1216 struct extent_buffer *eb;
1217 struct extent_buffer *parent;
1218 struct btrfs_ref ref = { 0 };
1219 struct btrfs_key key;
1220 u64 old_bytenr;
1221 u64 new_bytenr;
1222 u64 old_ptr_gen;
1223 u64 new_ptr_gen;
1224 u64 last_snapshot;
1225 u32 blocksize;
1226 int cow = 0;
1227 int level;
1228 int ret;
1229 int slot;
1230
1231 ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1232 ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1233
1234 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1235again:
1236 slot = path->slots[lowest_level];
1237 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1238
1239 eb = btrfs_lock_root_node(dest);
1240 level = btrfs_header_level(eb);
1241
1242 if (level < lowest_level) {
1243 btrfs_tree_unlock(eb);
1244 free_extent_buffer(eb);
1245 return 0;
1246 }
1247
1248 if (cow) {
1249 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1250 BTRFS_NESTING_COW);
1251 if (ret) {
1252 btrfs_tree_unlock(eb);
1253 free_extent_buffer(eb);
1254 return ret;
1255 }
1256 }
1257
1258 if (next_key) {
1259 next_key->objectid = (u64)-1;
1260 next_key->type = (u8)-1;
1261 next_key->offset = (u64)-1;
1262 }
1263
1264 parent = eb;
1265 while (1) {
1266 level = btrfs_header_level(parent);
1267 ASSERT(level >= lowest_level);
1268
1269 ret = btrfs_bin_search(parent, &key, &slot);
1270 if (ret < 0)
1271 break;
1272 if (ret && slot > 0)
1273 slot--;
1274
1275 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1276 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1277
1278 old_bytenr = btrfs_node_blockptr(parent, slot);
1279 blocksize = fs_info->nodesize;
1280 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1281
1282 if (level <= max_level) {
1283 eb = path->nodes[level];
1284 new_bytenr = btrfs_node_blockptr(eb,
1285 path->slots[level]);
1286 new_ptr_gen = btrfs_node_ptr_generation(eb,
1287 path->slots[level]);
1288 } else {
1289 new_bytenr = 0;
1290 new_ptr_gen = 0;
1291 }
1292
1293 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1294 ret = level;
1295 break;
1296 }
1297
1298 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1299 memcmp_node_keys(parent, slot, path, level)) {
1300 if (level <= lowest_level) {
1301 ret = 0;
1302 break;
1303 }
1304
1305 eb = btrfs_read_node_slot(parent, slot);
1306 if (IS_ERR(eb)) {
1307 ret = PTR_ERR(eb);
1308 break;
1309 }
1310 btrfs_tree_lock(eb);
1311 if (cow) {
1312 ret = btrfs_cow_block(trans, dest, eb, parent,
1313 slot, &eb,
1314 BTRFS_NESTING_COW);
1315 if (ret) {
1316 btrfs_tree_unlock(eb);
1317 free_extent_buffer(eb);
1318 break;
1319 }
1320 }
1321
1322 btrfs_tree_unlock(parent);
1323 free_extent_buffer(parent);
1324
1325 parent = eb;
1326 continue;
1327 }
1328
1329 if (!cow) {
1330 btrfs_tree_unlock(parent);
1331 free_extent_buffer(parent);
1332 cow = 1;
1333 goto again;
1334 }
1335
1336 btrfs_node_key_to_cpu(path->nodes[level], &key,
1337 path->slots[level]);
1338 btrfs_release_path(path);
1339
1340 path->lowest_level = level;
1341 set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1342 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1343 clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1344 path->lowest_level = 0;
1345 if (ret) {
1346 if (ret > 0)
1347 ret = -ENOENT;
1348 break;
1349 }
1350
1351 /*
1352 * Info qgroup to trace both subtrees.
1353 *
1354 * We must trace both trees.
1355 * 1) Tree reloc subtree
1356 * If not traced, we will leak data numbers
1357 * 2) Fs subtree
1358 * If not traced, we will double count old data
1359 *
1360 * We don't scan the subtree right now, but only record
1361 * the swapped tree blocks.
1362 * The real subtree rescan is delayed until we have new
1363 * CoW on the subtree root node before transaction commit.
1364 */
1365 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1366 rc->block_group, parent, slot,
1367 path->nodes[level], path->slots[level],
1368 last_snapshot);
1369 if (ret < 0)
1370 break;
1371 /*
1372 * swap blocks in fs tree and reloc tree.
1373 */
1374 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1375 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1376 btrfs_mark_buffer_dirty(parent);
1377
1378 btrfs_set_node_blockptr(path->nodes[level],
1379 path->slots[level], old_bytenr);
1380 btrfs_set_node_ptr_generation(path->nodes[level],
1381 path->slots[level], old_ptr_gen);
1382 btrfs_mark_buffer_dirty(path->nodes[level]);
1383
1384 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1385 blocksize, path->nodes[level]->start);
1386 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1387 0, true);
1388 ret = btrfs_inc_extent_ref(trans, &ref);
1389 if (ret) {
1390 btrfs_abort_transaction(trans, ret);
1391 break;
1392 }
1393 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1394 blocksize, 0);
1395 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
1396 true);
1397 ret = btrfs_inc_extent_ref(trans, &ref);
1398 if (ret) {
1399 btrfs_abort_transaction(trans, ret);
1400 break;
1401 }
1402
1403 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1404 blocksize, path->nodes[level]->start);
1405 btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1406 0, true);
1407 ret = btrfs_free_extent(trans, &ref);
1408 if (ret) {
1409 btrfs_abort_transaction(trans, ret);
1410 break;
1411 }
1412
1413 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1414 blocksize, 0);
1415 btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
1416 0, true);
1417 ret = btrfs_free_extent(trans, &ref);
1418 if (ret) {
1419 btrfs_abort_transaction(trans, ret);
1420 break;
1421 }
1422
1423 btrfs_unlock_up_safe(path, 0);
1424
1425 ret = level;
1426 break;
1427 }
1428 btrfs_tree_unlock(parent);
1429 free_extent_buffer(parent);
1430 return ret;
1431}
1432
1433/*
1434 * helper to find next relocated block in reloc tree
1435 */
1436static noinline_for_stack
1437int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1438 int *level)
1439{
1440 struct extent_buffer *eb;
1441 int i;
1442 u64 last_snapshot;
1443 u32 nritems;
1444
1445 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1446
1447 for (i = 0; i < *level; i++) {
1448 free_extent_buffer(path->nodes[i]);
1449 path->nodes[i] = NULL;
1450 }
1451
1452 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1453 eb = path->nodes[i];
1454 nritems = btrfs_header_nritems(eb);
1455 while (path->slots[i] + 1 < nritems) {
1456 path->slots[i]++;
1457 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1458 last_snapshot)
1459 continue;
1460
1461 *level = i;
1462 return 0;
1463 }
1464 free_extent_buffer(path->nodes[i]);
1465 path->nodes[i] = NULL;
1466 }
1467 return 1;
1468}
1469
1470/*
1471 * walk down reloc tree to find relocated block of lowest level
1472 */
1473static noinline_for_stack
1474int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1475 int *level)
1476{
1477 struct extent_buffer *eb = NULL;
1478 int i;
1479 u64 ptr_gen = 0;
1480 u64 last_snapshot;
1481 u32 nritems;
1482
1483 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1484
1485 for (i = *level; i > 0; i--) {
1486 eb = path->nodes[i];
1487 nritems = btrfs_header_nritems(eb);
1488 while (path->slots[i] < nritems) {
1489 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1490 if (ptr_gen > last_snapshot)
1491 break;
1492 path->slots[i]++;
1493 }
1494 if (path->slots[i] >= nritems) {
1495 if (i == *level)
1496 break;
1497 *level = i + 1;
1498 return 0;
1499 }
1500 if (i == 1) {
1501 *level = i;
1502 return 0;
1503 }
1504
1505 eb = btrfs_read_node_slot(eb, path->slots[i]);
1506 if (IS_ERR(eb))
1507 return PTR_ERR(eb);
1508 BUG_ON(btrfs_header_level(eb) != i - 1);
1509 path->nodes[i - 1] = eb;
1510 path->slots[i - 1] = 0;
1511 }
1512 return 1;
1513}
1514
1515/*
1516 * invalidate extent cache for file extents whose key in range of
1517 * [min_key, max_key)
1518 */
1519static int invalidate_extent_cache(struct btrfs_root *root,
1520 struct btrfs_key *min_key,
1521 struct btrfs_key *max_key)
1522{
1523 struct btrfs_fs_info *fs_info = root->fs_info;
1524 struct inode *inode = NULL;
1525 u64 objectid;
1526 u64 start, end;
1527 u64 ino;
1528
1529 objectid = min_key->objectid;
1530 while (1) {
1531 struct extent_state *cached_state = NULL;
1532
1533 cond_resched();
1534 iput(inode);
1535
1536 if (objectid > max_key->objectid)
1537 break;
1538
1539 inode = find_next_inode(root, objectid);
1540 if (!inode)
1541 break;
1542 ino = btrfs_ino(BTRFS_I(inode));
1543
1544 if (ino > max_key->objectid) {
1545 iput(inode);
1546 break;
1547 }
1548
1549 objectid = ino + 1;
1550 if (!S_ISREG(inode->i_mode))
1551 continue;
1552
1553 if (unlikely(min_key->objectid == ino)) {
1554 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1555 continue;
1556 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1557 start = 0;
1558 else {
1559 start = min_key->offset;
1560 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1561 }
1562 } else {
1563 start = 0;
1564 }
1565
1566 if (unlikely(max_key->objectid == ino)) {
1567 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1568 continue;
1569 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1570 end = (u64)-1;
1571 } else {
1572 if (max_key->offset == 0)
1573 continue;
1574 end = max_key->offset;
1575 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1576 end--;
1577 }
1578 } else {
1579 end = (u64)-1;
1580 }
1581
1582 /* the lock_extent waits for read_folio to complete */
1583 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1584 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
1585 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1586 }
1587 return 0;
1588}
1589
1590static int find_next_key(struct btrfs_path *path, int level,
1591 struct btrfs_key *key)
1592
1593{
1594 while (level < BTRFS_MAX_LEVEL) {
1595 if (!path->nodes[level])
1596 break;
1597 if (path->slots[level] + 1 <
1598 btrfs_header_nritems(path->nodes[level])) {
1599 btrfs_node_key_to_cpu(path->nodes[level], key,
1600 path->slots[level] + 1);
1601 return 0;
1602 }
1603 level++;
1604 }
1605 return 1;
1606}
1607
1608/*
1609 * Insert current subvolume into reloc_control::dirty_subvol_roots
1610 */
1611static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1612 struct reloc_control *rc,
1613 struct btrfs_root *root)
1614{
1615 struct btrfs_root *reloc_root = root->reloc_root;
1616 struct btrfs_root_item *reloc_root_item;
1617 int ret;
1618
1619 /* @root must be a subvolume tree root with a valid reloc tree */
1620 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1621 ASSERT(reloc_root);
1622
1623 reloc_root_item = &reloc_root->root_item;
1624 memset(&reloc_root_item->drop_progress, 0,
1625 sizeof(reloc_root_item->drop_progress));
1626 btrfs_set_root_drop_level(reloc_root_item, 0);
1627 btrfs_set_root_refs(reloc_root_item, 0);
1628 ret = btrfs_update_reloc_root(trans, root);
1629 if (ret)
1630 return ret;
1631
1632 if (list_empty(&root->reloc_dirty_list)) {
1633 btrfs_grab_root(root);
1634 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1635 }
1636
1637 return 0;
1638}
1639
1640static int clean_dirty_subvols(struct reloc_control *rc)
1641{
1642 struct btrfs_root *root;
1643 struct btrfs_root *next;
1644 int ret = 0;
1645 int ret2;
1646
1647 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1648 reloc_dirty_list) {
1649 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1650 /* Merged subvolume, cleanup its reloc root */
1651 struct btrfs_root *reloc_root = root->reloc_root;
1652
1653 list_del_init(&root->reloc_dirty_list);
1654 root->reloc_root = NULL;
1655 /*
1656 * Need barrier to ensure clear_bit() only happens after
1657 * root->reloc_root = NULL. Pairs with have_reloc_root.
1658 */
1659 smp_wmb();
1660 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1661 if (reloc_root) {
1662 /*
1663 * btrfs_drop_snapshot drops our ref we hold for
1664 * ->reloc_root. If it fails however we must
1665 * drop the ref ourselves.
1666 */
1667 ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1668 if (ret2 < 0) {
1669 btrfs_put_root(reloc_root);
1670 if (!ret)
1671 ret = ret2;
1672 }
1673 }
1674 btrfs_put_root(root);
1675 } else {
1676 /* Orphan reloc tree, just clean it up */
1677 ret2 = btrfs_drop_snapshot(root, 0, 1);
1678 if (ret2 < 0) {
1679 btrfs_put_root(root);
1680 if (!ret)
1681 ret = ret2;
1682 }
1683 }
1684 }
1685 return ret;
1686}
1687
1688/*
1689 * merge the relocated tree blocks in reloc tree with corresponding
1690 * fs tree.
1691 */
1692static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1693 struct btrfs_root *root)
1694{
1695 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1696 struct btrfs_key key;
1697 struct btrfs_key next_key;
1698 struct btrfs_trans_handle *trans = NULL;
1699 struct btrfs_root *reloc_root;
1700 struct btrfs_root_item *root_item;
1701 struct btrfs_path *path;
1702 struct extent_buffer *leaf;
1703 int reserve_level;
1704 int level;
1705 int max_level;
1706 int replaced = 0;
1707 int ret = 0;
1708 u32 min_reserved;
1709
1710 path = btrfs_alloc_path();
1711 if (!path)
1712 return -ENOMEM;
1713 path->reada = READA_FORWARD;
1714
1715 reloc_root = root->reloc_root;
1716 root_item = &reloc_root->root_item;
1717
1718 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1719 level = btrfs_root_level(root_item);
1720 atomic_inc(&reloc_root->node->refs);
1721 path->nodes[level] = reloc_root->node;
1722 path->slots[level] = 0;
1723 } else {
1724 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1725
1726 level = btrfs_root_drop_level(root_item);
1727 BUG_ON(level == 0);
1728 path->lowest_level = level;
1729 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1730 path->lowest_level = 0;
1731 if (ret < 0) {
1732 btrfs_free_path(path);
1733 return ret;
1734 }
1735
1736 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1737 path->slots[level]);
1738 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1739
1740 btrfs_unlock_up_safe(path, 0);
1741 }
1742
1743 /*
1744 * In merge_reloc_root(), we modify the upper level pointer to swap the
1745 * tree blocks between reloc tree and subvolume tree. Thus for tree
1746 * block COW, we COW at most from level 1 to root level for each tree.
1747 *
1748 * Thus the needed metadata size is at most root_level * nodesize,
1749 * and * 2 since we have two trees to COW.
1750 */
1751 reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1752 min_reserved = fs_info->nodesize * reserve_level * 2;
1753 memset(&next_key, 0, sizeof(next_key));
1754
1755 while (1) {
1756 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1757 min_reserved,
1758 BTRFS_RESERVE_FLUSH_LIMIT);
1759 if (ret)
1760 goto out;
1761 trans = btrfs_start_transaction(root, 0);
1762 if (IS_ERR(trans)) {
1763 ret = PTR_ERR(trans);
1764 trans = NULL;
1765 goto out;
1766 }
1767
1768 /*
1769 * At this point we no longer have a reloc_control, so we can't
1770 * depend on btrfs_init_reloc_root to update our last_trans.
1771 *
1772 * But that's ok, we started the trans handle on our
1773 * corresponding fs_root, which means it's been added to the
1774 * dirty list. At commit time we'll still call
1775 * btrfs_update_reloc_root() and update our root item
1776 * appropriately.
1777 */
1778 reloc_root->last_trans = trans->transid;
1779 trans->block_rsv = rc->block_rsv;
1780
1781 replaced = 0;
1782 max_level = level;
1783
1784 ret = walk_down_reloc_tree(reloc_root, path, &level);
1785 if (ret < 0)
1786 goto out;
1787 if (ret > 0)
1788 break;
1789
1790 if (!find_next_key(path, level, &key) &&
1791 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1792 ret = 0;
1793 } else {
1794 ret = replace_path(trans, rc, root, reloc_root, path,
1795 &next_key, level, max_level);
1796 }
1797 if (ret < 0)
1798 goto out;
1799 if (ret > 0) {
1800 level = ret;
1801 btrfs_node_key_to_cpu(path->nodes[level], &key,
1802 path->slots[level]);
1803 replaced = 1;
1804 }
1805
1806 ret = walk_up_reloc_tree(reloc_root, path, &level);
1807 if (ret > 0)
1808 break;
1809
1810 BUG_ON(level == 0);
1811 /*
1812 * save the merging progress in the drop_progress.
1813 * this is OK since root refs == 1 in this case.
1814 */
1815 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1816 path->slots[level]);
1817 btrfs_set_root_drop_level(root_item, level);
1818
1819 btrfs_end_transaction_throttle(trans);
1820 trans = NULL;
1821
1822 btrfs_btree_balance_dirty(fs_info);
1823
1824 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1825 invalidate_extent_cache(root, &key, &next_key);
1826 }
1827
1828 /*
1829 * handle the case only one block in the fs tree need to be
1830 * relocated and the block is tree root.
1831 */
1832 leaf = btrfs_lock_root_node(root);
1833 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1834 BTRFS_NESTING_COW);
1835 btrfs_tree_unlock(leaf);
1836 free_extent_buffer(leaf);
1837out:
1838 btrfs_free_path(path);
1839
1840 if (ret == 0) {
1841 ret = insert_dirty_subvol(trans, rc, root);
1842 if (ret)
1843 btrfs_abort_transaction(trans, ret);
1844 }
1845
1846 if (trans)
1847 btrfs_end_transaction_throttle(trans);
1848
1849 btrfs_btree_balance_dirty(fs_info);
1850
1851 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1852 invalidate_extent_cache(root, &key, &next_key);
1853
1854 return ret;
1855}
1856
1857static noinline_for_stack
1858int prepare_to_merge(struct reloc_control *rc, int err)
1859{
1860 struct btrfs_root *root = rc->extent_root;
1861 struct btrfs_fs_info *fs_info = root->fs_info;
1862 struct btrfs_root *reloc_root;
1863 struct btrfs_trans_handle *trans;
1864 LIST_HEAD(reloc_roots);
1865 u64 num_bytes = 0;
1866 int ret;
1867
1868 mutex_lock(&fs_info->reloc_mutex);
1869 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1870 rc->merging_rsv_size += rc->nodes_relocated * 2;
1871 mutex_unlock(&fs_info->reloc_mutex);
1872
1873again:
1874 if (!err) {
1875 num_bytes = rc->merging_rsv_size;
1876 ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1877 BTRFS_RESERVE_FLUSH_ALL);
1878 if (ret)
1879 err = ret;
1880 }
1881
1882 trans = btrfs_join_transaction(rc->extent_root);
1883 if (IS_ERR(trans)) {
1884 if (!err)
1885 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1886 num_bytes, NULL);
1887 return PTR_ERR(trans);
1888 }
1889
1890 if (!err) {
1891 if (num_bytes != rc->merging_rsv_size) {
1892 btrfs_end_transaction(trans);
1893 btrfs_block_rsv_release(fs_info, rc->block_rsv,
1894 num_bytes, NULL);
1895 goto again;
1896 }
1897 }
1898
1899 rc->merge_reloc_tree = 1;
1900
1901 while (!list_empty(&rc->reloc_roots)) {
1902 reloc_root = list_entry(rc->reloc_roots.next,
1903 struct btrfs_root, root_list);
1904 list_del_init(&reloc_root->root_list);
1905
1906 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1907 false);
1908 if (IS_ERR(root)) {
1909 /*
1910 * Even if we have an error we need this reloc root
1911 * back on our list so we can clean up properly.
1912 */
1913 list_add(&reloc_root->root_list, &reloc_roots);
1914 btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1915 if (!err)
1916 err = PTR_ERR(root);
1917 break;
1918 }
1919 ASSERT(root->reloc_root == reloc_root);
1920
1921 /*
1922 * set reference count to 1, so btrfs_recover_relocation
1923 * knows it should resumes merging
1924 */
1925 if (!err)
1926 btrfs_set_root_refs(&reloc_root->root_item, 1);
1927 ret = btrfs_update_reloc_root(trans, root);
1928
1929 /*
1930 * Even if we have an error we need this reloc root back on our
1931 * list so we can clean up properly.
1932 */
1933 list_add(&reloc_root->root_list, &reloc_roots);
1934 btrfs_put_root(root);
1935
1936 if (ret) {
1937 btrfs_abort_transaction(trans, ret);
1938 if (!err)
1939 err = ret;
1940 break;
1941 }
1942 }
1943
1944 list_splice(&reloc_roots, &rc->reloc_roots);
1945
1946 if (!err)
1947 err = btrfs_commit_transaction(trans);
1948 else
1949 btrfs_end_transaction(trans);
1950 return err;
1951}
1952
1953static noinline_for_stack
1954void free_reloc_roots(struct list_head *list)
1955{
1956 struct btrfs_root *reloc_root, *tmp;
1957
1958 list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1959 __del_reloc_root(reloc_root);
1960}
1961
1962static noinline_for_stack
1963void merge_reloc_roots(struct reloc_control *rc)
1964{
1965 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1966 struct btrfs_root *root;
1967 struct btrfs_root *reloc_root;
1968 LIST_HEAD(reloc_roots);
1969 int found = 0;
1970 int ret = 0;
1971again:
1972 root = rc->extent_root;
1973
1974 /*
1975 * this serializes us with btrfs_record_root_in_transaction,
1976 * we have to make sure nobody is in the middle of
1977 * adding their roots to the list while we are
1978 * doing this splice
1979 */
1980 mutex_lock(&fs_info->reloc_mutex);
1981 list_splice_init(&rc->reloc_roots, &reloc_roots);
1982 mutex_unlock(&fs_info->reloc_mutex);
1983
1984 while (!list_empty(&reloc_roots)) {
1985 found = 1;
1986 reloc_root = list_entry(reloc_roots.next,
1987 struct btrfs_root, root_list);
1988
1989 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1990 false);
1991 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1992 if (IS_ERR(root)) {
1993 /*
1994 * For recovery we read the fs roots on mount,
1995 * and if we didn't find the root then we marked
1996 * the reloc root as a garbage root. For normal
1997 * relocation obviously the root should exist in
1998 * memory. However there's no reason we can't
1999 * handle the error properly here just in case.
2000 */
2001 ASSERT(0);
2002 ret = PTR_ERR(root);
2003 goto out;
2004 }
2005 if (root->reloc_root != reloc_root) {
2006 /*
2007 * This is actually impossible without something
2008 * going really wrong (like weird race condition
2009 * or cosmic rays).
2010 */
2011 ASSERT(0);
2012 ret = -EINVAL;
2013 goto out;
2014 }
2015 ret = merge_reloc_root(rc, root);
2016 btrfs_put_root(root);
2017 if (ret) {
2018 if (list_empty(&reloc_root->root_list))
2019 list_add_tail(&reloc_root->root_list,
2020 &reloc_roots);
2021 goto out;
2022 }
2023 } else {
2024 if (!IS_ERR(root)) {
2025 if (root->reloc_root == reloc_root) {
2026 root->reloc_root = NULL;
2027 btrfs_put_root(reloc_root);
2028 }
2029 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2030 &root->state);
2031 btrfs_put_root(root);
2032 }
2033
2034 list_del_init(&reloc_root->root_list);
2035 /* Don't forget to queue this reloc root for cleanup */
2036 list_add_tail(&reloc_root->reloc_dirty_list,
2037 &rc->dirty_subvol_roots);
2038 }
2039 }
2040
2041 if (found) {
2042 found = 0;
2043 goto again;
2044 }
2045out:
2046 if (ret) {
2047 btrfs_handle_fs_error(fs_info, ret, NULL);
2048 free_reloc_roots(&reloc_roots);
2049
2050 /* new reloc root may be added */
2051 mutex_lock(&fs_info->reloc_mutex);
2052 list_splice_init(&rc->reloc_roots, &reloc_roots);
2053 mutex_unlock(&fs_info->reloc_mutex);
2054 free_reloc_roots(&reloc_roots);
2055 }
2056
2057 /*
2058 * We used to have
2059 *
2060 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2061 *
2062 * here, but it's wrong. If we fail to start the transaction in
2063 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2064 * have actually been removed from the reloc_root_tree rb tree. This is
2065 * fine because we're bailing here, and we hold a reference on the root
2066 * for the list that holds it, so these roots will be cleaned up when we
2067 * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
2068 * will be cleaned up on unmount.
2069 *
2070 * The remaining nodes will be cleaned up by free_reloc_control.
2071 */
2072}
2073
2074static void free_block_list(struct rb_root *blocks)
2075{
2076 struct tree_block *block;
2077 struct rb_node *rb_node;
2078 while ((rb_node = rb_first(blocks))) {
2079 block = rb_entry(rb_node, struct tree_block, rb_node);
2080 rb_erase(rb_node, blocks);
2081 kfree(block);
2082 }
2083}
2084
2085static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2086 struct btrfs_root *reloc_root)
2087{
2088 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2089 struct btrfs_root *root;
2090 int ret;
2091
2092 if (reloc_root->last_trans == trans->transid)
2093 return 0;
2094
2095 root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2096
2097 /*
2098 * This should succeed, since we can't have a reloc root without having
2099 * already looked up the actual root and created the reloc root for this
2100 * root.
2101 *
2102 * However if there's some sort of corruption where we have a ref to a
2103 * reloc root without a corresponding root this could return ENOENT.
2104 */
2105 if (IS_ERR(root)) {
2106 ASSERT(0);
2107 return PTR_ERR(root);
2108 }
2109 if (root->reloc_root != reloc_root) {
2110 ASSERT(0);
2111 btrfs_err(fs_info,
2112 "root %llu has two reloc roots associated with it",
2113 reloc_root->root_key.offset);
2114 btrfs_put_root(root);
2115 return -EUCLEAN;
2116 }
2117 ret = btrfs_record_root_in_trans(trans, root);
2118 btrfs_put_root(root);
2119
2120 return ret;
2121}
2122
2123static noinline_for_stack
2124struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2125 struct reloc_control *rc,
2126 struct btrfs_backref_node *node,
2127 struct btrfs_backref_edge *edges[])
2128{
2129 struct btrfs_backref_node *next;
2130 struct btrfs_root *root;
2131 int index = 0;
2132 int ret;
2133
2134 next = node;
2135 while (1) {
2136 cond_resched();
2137 next = walk_up_backref(next, edges, &index);
2138 root = next->root;
2139
2140 /*
2141 * If there is no root, then our references for this block are
2142 * incomplete, as we should be able to walk all the way up to a
2143 * block that is owned by a root.
2144 *
2145 * This path is only for SHAREABLE roots, so if we come upon a
2146 * non-SHAREABLE root then we have backrefs that resolve
2147 * improperly.
2148 *
2149 * Both of these cases indicate file system corruption, or a bug
2150 * in the backref walking code.
2151 */
2152 if (!root) {
2153 ASSERT(0);
2154 btrfs_err(trans->fs_info,
2155 "bytenr %llu doesn't have a backref path ending in a root",
2156 node->bytenr);
2157 return ERR_PTR(-EUCLEAN);
2158 }
2159 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2160 ASSERT(0);
2161 btrfs_err(trans->fs_info,
2162 "bytenr %llu has multiple refs with one ending in a non-shareable root",
2163 node->bytenr);
2164 return ERR_PTR(-EUCLEAN);
2165 }
2166
2167 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2168 ret = record_reloc_root_in_trans(trans, root);
2169 if (ret)
2170 return ERR_PTR(ret);
2171 break;
2172 }
2173
2174 ret = btrfs_record_root_in_trans(trans, root);
2175 if (ret)
2176 return ERR_PTR(ret);
2177 root = root->reloc_root;
2178
2179 /*
2180 * We could have raced with another thread which failed, so
2181 * root->reloc_root may not be set, return ENOENT in this case.
2182 */
2183 if (!root)
2184 return ERR_PTR(-ENOENT);
2185
2186 if (next->new_bytenr != root->node->start) {
2187 /*
2188 * We just created the reloc root, so we shouldn't have
2189 * ->new_bytenr set and this shouldn't be in the changed
2190 * list. If it is then we have multiple roots pointing
2191 * at the same bytenr which indicates corruption, or
2192 * we've made a mistake in the backref walking code.
2193 */
2194 ASSERT(next->new_bytenr == 0);
2195 ASSERT(list_empty(&next->list));
2196 if (next->new_bytenr || !list_empty(&next->list)) {
2197 btrfs_err(trans->fs_info,
2198 "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2199 node->bytenr, next->bytenr);
2200 return ERR_PTR(-EUCLEAN);
2201 }
2202
2203 next->new_bytenr = root->node->start;
2204 btrfs_put_root(next->root);
2205 next->root = btrfs_grab_root(root);
2206 ASSERT(next->root);
2207 list_add_tail(&next->list,
2208 &rc->backref_cache.changed);
2209 mark_block_processed(rc, next);
2210 break;
2211 }
2212
2213 WARN_ON(1);
2214 root = NULL;
2215 next = walk_down_backref(edges, &index);
2216 if (!next || next->level <= node->level)
2217 break;
2218 }
2219 if (!root) {
2220 /*
2221 * This can happen if there's fs corruption or if there's a bug
2222 * in the backref lookup code.
2223 */
2224 ASSERT(0);
2225 return ERR_PTR(-ENOENT);
2226 }
2227
2228 next = node;
2229 /* setup backref node path for btrfs_reloc_cow_block */
2230 while (1) {
2231 rc->backref_cache.path[next->level] = next;
2232 if (--index < 0)
2233 break;
2234 next = edges[index]->node[UPPER];
2235 }
2236 return root;
2237}
2238
2239/*
2240 * Select a tree root for relocation.
2241 *
2242 * Return NULL if the block is not shareable. We should use do_relocation() in
2243 * this case.
2244 *
2245 * Return a tree root pointer if the block is shareable.
2246 * Return -ENOENT if the block is root of reloc tree.
2247 */
2248static noinline_for_stack
2249struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2250{
2251 struct btrfs_backref_node *next;
2252 struct btrfs_root *root;
2253 struct btrfs_root *fs_root = NULL;
2254 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2255 int index = 0;
2256
2257 next = node;
2258 while (1) {
2259 cond_resched();
2260 next = walk_up_backref(next, edges, &index);
2261 root = next->root;
2262
2263 /*
2264 * This can occur if we have incomplete extent refs leading all
2265 * the way up a particular path, in this case return -EUCLEAN.
2266 */
2267 if (!root)
2268 return ERR_PTR(-EUCLEAN);
2269
2270 /* No other choice for non-shareable tree */
2271 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2272 return root;
2273
2274 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2275 fs_root = root;
2276
2277 if (next != node)
2278 return NULL;
2279
2280 next = walk_down_backref(edges, &index);
2281 if (!next || next->level <= node->level)
2282 break;
2283 }
2284
2285 if (!fs_root)
2286 return ERR_PTR(-ENOENT);
2287 return fs_root;
2288}
2289
2290static noinline_for_stack
2291u64 calcu_metadata_size(struct reloc_control *rc,
2292 struct btrfs_backref_node *node, int reserve)
2293{
2294 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2295 struct btrfs_backref_node *next = node;
2296 struct btrfs_backref_edge *edge;
2297 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2298 u64 num_bytes = 0;
2299 int index = 0;
2300
2301 BUG_ON(reserve && node->processed);
2302
2303 while (next) {
2304 cond_resched();
2305 while (1) {
2306 if (next->processed && (reserve || next != node))
2307 break;
2308
2309 num_bytes += fs_info->nodesize;
2310
2311 if (list_empty(&next->upper))
2312 break;
2313
2314 edge = list_entry(next->upper.next,
2315 struct btrfs_backref_edge, list[LOWER]);
2316 edges[index++] = edge;
2317 next = edge->node[UPPER];
2318 }
2319 next = walk_down_backref(edges, &index);
2320 }
2321 return num_bytes;
2322}
2323
2324static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2325 struct reloc_control *rc,
2326 struct btrfs_backref_node *node)
2327{
2328 struct btrfs_root *root = rc->extent_root;
2329 struct btrfs_fs_info *fs_info = root->fs_info;
2330 u64 num_bytes;
2331 int ret;
2332 u64 tmp;
2333
2334 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2335
2336 trans->block_rsv = rc->block_rsv;
2337 rc->reserved_bytes += num_bytes;
2338
2339 /*
2340 * We are under a transaction here so we can only do limited flushing.
2341 * If we get an enospc just kick back -EAGAIN so we know to drop the
2342 * transaction and try to refill when we can flush all the things.
2343 */
2344 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2345 BTRFS_RESERVE_FLUSH_LIMIT);
2346 if (ret) {
2347 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2348 while (tmp <= rc->reserved_bytes)
2349 tmp <<= 1;
2350 /*
2351 * only one thread can access block_rsv at this point,
2352 * so we don't need hold lock to protect block_rsv.
2353 * we expand more reservation size here to allow enough
2354 * space for relocation and we will return earlier in
2355 * enospc case.
2356 */
2357 rc->block_rsv->size = tmp + fs_info->nodesize *
2358 RELOCATION_RESERVED_NODES;
2359 return -EAGAIN;
2360 }
2361
2362 return 0;
2363}
2364
2365/*
2366 * relocate a block tree, and then update pointers in upper level
2367 * blocks that reference the block to point to the new location.
2368 *
2369 * if called by link_to_upper, the block has already been relocated.
2370 * in that case this function just updates pointers.
2371 */
2372static int do_relocation(struct btrfs_trans_handle *trans,
2373 struct reloc_control *rc,
2374 struct btrfs_backref_node *node,
2375 struct btrfs_key *key,
2376 struct btrfs_path *path, int lowest)
2377{
2378 struct btrfs_backref_node *upper;
2379 struct btrfs_backref_edge *edge;
2380 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2381 struct btrfs_root *root;
2382 struct extent_buffer *eb;
2383 u32 blocksize;
2384 u64 bytenr;
2385 int slot;
2386 int ret = 0;
2387
2388 /*
2389 * If we are lowest then this is the first time we're processing this
2390 * block, and thus shouldn't have an eb associated with it yet.
2391 */
2392 ASSERT(!lowest || !node->eb);
2393
2394 path->lowest_level = node->level + 1;
2395 rc->backref_cache.path[node->level] = node;
2396 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2397 struct btrfs_ref ref = { 0 };
2398
2399 cond_resched();
2400
2401 upper = edge->node[UPPER];
2402 root = select_reloc_root(trans, rc, upper, edges);
2403 if (IS_ERR(root)) {
2404 ret = PTR_ERR(root);
2405 goto next;
2406 }
2407
2408 if (upper->eb && !upper->locked) {
2409 if (!lowest) {
2410 ret = btrfs_bin_search(upper->eb, key, &slot);
2411 if (ret < 0)
2412 goto next;
2413 BUG_ON(ret);
2414 bytenr = btrfs_node_blockptr(upper->eb, slot);
2415 if (node->eb->start == bytenr)
2416 goto next;
2417 }
2418 btrfs_backref_drop_node_buffer(upper);
2419 }
2420
2421 if (!upper->eb) {
2422 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2423 if (ret) {
2424 if (ret > 0)
2425 ret = -ENOENT;
2426
2427 btrfs_release_path(path);
2428 break;
2429 }
2430
2431 if (!upper->eb) {
2432 upper->eb = path->nodes[upper->level];
2433 path->nodes[upper->level] = NULL;
2434 } else {
2435 BUG_ON(upper->eb != path->nodes[upper->level]);
2436 }
2437
2438 upper->locked = 1;
2439 path->locks[upper->level] = 0;
2440
2441 slot = path->slots[upper->level];
2442 btrfs_release_path(path);
2443 } else {
2444 ret = btrfs_bin_search(upper->eb, key, &slot);
2445 if (ret < 0)
2446 goto next;
2447 BUG_ON(ret);
2448 }
2449
2450 bytenr = btrfs_node_blockptr(upper->eb, slot);
2451 if (lowest) {
2452 if (bytenr != node->bytenr) {
2453 btrfs_err(root->fs_info,
2454 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2455 bytenr, node->bytenr, slot,
2456 upper->eb->start);
2457 ret = -EIO;
2458 goto next;
2459 }
2460 } else {
2461 if (node->eb->start == bytenr)
2462 goto next;
2463 }
2464
2465 blocksize = root->fs_info->nodesize;
2466 eb = btrfs_read_node_slot(upper->eb, slot);
2467 if (IS_ERR(eb)) {
2468 ret = PTR_ERR(eb);
2469 goto next;
2470 }
2471 btrfs_tree_lock(eb);
2472
2473 if (!node->eb) {
2474 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2475 slot, &eb, BTRFS_NESTING_COW);
2476 btrfs_tree_unlock(eb);
2477 free_extent_buffer(eb);
2478 if (ret < 0)
2479 goto next;
2480 /*
2481 * We've just COWed this block, it should have updated
2482 * the correct backref node entry.
2483 */
2484 ASSERT(node->eb == eb);
2485 } else {
2486 btrfs_set_node_blockptr(upper->eb, slot,
2487 node->eb->start);
2488 btrfs_set_node_ptr_generation(upper->eb, slot,
2489 trans->transid);
2490 btrfs_mark_buffer_dirty(upper->eb);
2491
2492 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2493 node->eb->start, blocksize,
2494 upper->eb->start);
2495 btrfs_init_tree_ref(&ref, node->level,
2496 btrfs_header_owner(upper->eb),
2497 root->root_key.objectid, false);
2498 ret = btrfs_inc_extent_ref(trans, &ref);
2499 if (!ret)
2500 ret = btrfs_drop_subtree(trans, root, eb,
2501 upper->eb);
2502 if (ret)
2503 btrfs_abort_transaction(trans, ret);
2504 }
2505next:
2506 if (!upper->pending)
2507 btrfs_backref_drop_node_buffer(upper);
2508 else
2509 btrfs_backref_unlock_node_buffer(upper);
2510 if (ret)
2511 break;
2512 }
2513
2514 if (!ret && node->pending) {
2515 btrfs_backref_drop_node_buffer(node);
2516 list_move_tail(&node->list, &rc->backref_cache.changed);
2517 node->pending = 0;
2518 }
2519
2520 path->lowest_level = 0;
2521
2522 /*
2523 * We should have allocated all of our space in the block rsv and thus
2524 * shouldn't ENOSPC.
2525 */
2526 ASSERT(ret != -ENOSPC);
2527 return ret;
2528}
2529
2530static int link_to_upper(struct btrfs_trans_handle *trans,
2531 struct reloc_control *rc,
2532 struct btrfs_backref_node *node,
2533 struct btrfs_path *path)
2534{
2535 struct btrfs_key key;
2536
2537 btrfs_node_key_to_cpu(node->eb, &key, 0);
2538 return do_relocation(trans, rc, node, &key, path, 0);
2539}
2540
2541static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2542 struct reloc_control *rc,
2543 struct btrfs_path *path, int err)
2544{
2545 LIST_HEAD(list);
2546 struct btrfs_backref_cache *cache = &rc->backref_cache;
2547 struct btrfs_backref_node *node;
2548 int level;
2549 int ret;
2550
2551 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2552 while (!list_empty(&cache->pending[level])) {
2553 node = list_entry(cache->pending[level].next,
2554 struct btrfs_backref_node, list);
2555 list_move_tail(&node->list, &list);
2556 BUG_ON(!node->pending);
2557
2558 if (!err) {
2559 ret = link_to_upper(trans, rc, node, path);
2560 if (ret < 0)
2561 err = ret;
2562 }
2563 }
2564 list_splice_init(&list, &cache->pending[level]);
2565 }
2566 return err;
2567}
2568
2569/*
2570 * mark a block and all blocks directly/indirectly reference the block
2571 * as processed.
2572 */
2573static void update_processed_blocks(struct reloc_control *rc,
2574 struct btrfs_backref_node *node)
2575{
2576 struct btrfs_backref_node *next = node;
2577 struct btrfs_backref_edge *edge;
2578 struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2579 int index = 0;
2580
2581 while (next) {
2582 cond_resched();
2583 while (1) {
2584 if (next->processed)
2585 break;
2586
2587 mark_block_processed(rc, next);
2588
2589 if (list_empty(&next->upper))
2590 break;
2591
2592 edge = list_entry(next->upper.next,
2593 struct btrfs_backref_edge, list[LOWER]);
2594 edges[index++] = edge;
2595 next = edge->node[UPPER];
2596 }
2597 next = walk_down_backref(edges, &index);
2598 }
2599}
2600
2601static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2602{
2603 u32 blocksize = rc->extent_root->fs_info->nodesize;
2604
2605 if (test_range_bit(&rc->processed_blocks, bytenr,
2606 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2607 return 1;
2608 return 0;
2609}
2610
2611static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2612 struct tree_block *block)
2613{
2614 struct btrfs_tree_parent_check check = {
2615 .level = block->level,
2616 .owner_root = block->owner,
2617 .transid = block->key.offset
2618 };
2619 struct extent_buffer *eb;
2620
2621 eb = read_tree_block(fs_info, block->bytenr, &check);
2622 if (IS_ERR(eb))
2623 return PTR_ERR(eb);
2624 if (!extent_buffer_uptodate(eb)) {
2625 free_extent_buffer(eb);
2626 return -EIO;
2627 }
2628 if (block->level == 0)
2629 btrfs_item_key_to_cpu(eb, &block->key, 0);
2630 else
2631 btrfs_node_key_to_cpu(eb, &block->key, 0);
2632 free_extent_buffer(eb);
2633 block->key_ready = 1;
2634 return 0;
2635}
2636
2637/*
2638 * helper function to relocate a tree block
2639 */
2640static int relocate_tree_block(struct btrfs_trans_handle *trans,
2641 struct reloc_control *rc,
2642 struct btrfs_backref_node *node,
2643 struct btrfs_key *key,
2644 struct btrfs_path *path)
2645{
2646 struct btrfs_root *root;
2647 int ret = 0;
2648
2649 if (!node)
2650 return 0;
2651
2652 /*
2653 * If we fail here we want to drop our backref_node because we are going
2654 * to start over and regenerate the tree for it.
2655 */
2656 ret = reserve_metadata_space(trans, rc, node);
2657 if (ret)
2658 goto out;
2659
2660 BUG_ON(node->processed);
2661 root = select_one_root(node);
2662 if (IS_ERR(root)) {
2663 ret = PTR_ERR(root);
2664
2665 /* See explanation in select_one_root for the -EUCLEAN case. */
2666 ASSERT(ret == -ENOENT);
2667 if (ret == -ENOENT) {
2668 ret = 0;
2669 update_processed_blocks(rc, node);
2670 }
2671 goto out;
2672 }
2673
2674 if (root) {
2675 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2676 /*
2677 * This block was the root block of a root, and this is
2678 * the first time we're processing the block and thus it
2679 * should not have had the ->new_bytenr modified and
2680 * should have not been included on the changed list.
2681 *
2682 * However in the case of corruption we could have
2683 * multiple refs pointing to the same block improperly,
2684 * and thus we would trip over these checks. ASSERT()
2685 * for the developer case, because it could indicate a
2686 * bug in the backref code, however error out for a
2687 * normal user in the case of corruption.
2688 */
2689 ASSERT(node->new_bytenr == 0);
2690 ASSERT(list_empty(&node->list));
2691 if (node->new_bytenr || !list_empty(&node->list)) {
2692 btrfs_err(root->fs_info,
2693 "bytenr %llu has improper references to it",
2694 node->bytenr);
2695 ret = -EUCLEAN;
2696 goto out;
2697 }
2698 ret = btrfs_record_root_in_trans(trans, root);
2699 if (ret)
2700 goto out;
2701 /*
2702 * Another thread could have failed, need to check if we
2703 * have reloc_root actually set.
2704 */
2705 if (!root->reloc_root) {
2706 ret = -ENOENT;
2707 goto out;
2708 }
2709 root = root->reloc_root;
2710 node->new_bytenr = root->node->start;
2711 btrfs_put_root(node->root);
2712 node->root = btrfs_grab_root(root);
2713 ASSERT(node->root);
2714 list_add_tail(&node->list, &rc->backref_cache.changed);
2715 } else {
2716 path->lowest_level = node->level;
2717 if (root == root->fs_info->chunk_root)
2718 btrfs_reserve_chunk_metadata(trans, false);
2719 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2720 btrfs_release_path(path);
2721 if (root == root->fs_info->chunk_root)
2722 btrfs_trans_release_chunk_metadata(trans);
2723 if (ret > 0)
2724 ret = 0;
2725 }
2726 if (!ret)
2727 update_processed_blocks(rc, node);
2728 } else {
2729 ret = do_relocation(trans, rc, node, key, path, 1);
2730 }
2731out:
2732 if (ret || node->level == 0 || node->cowonly)
2733 btrfs_backref_cleanup_node(&rc->backref_cache, node);
2734 return ret;
2735}
2736
2737/*
2738 * relocate a list of blocks
2739 */
2740static noinline_for_stack
2741int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2742 struct reloc_control *rc, struct rb_root *blocks)
2743{
2744 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2745 struct btrfs_backref_node *node;
2746 struct btrfs_path *path;
2747 struct tree_block *block;
2748 struct tree_block *next;
2749 int ret;
2750 int err = 0;
2751
2752 path = btrfs_alloc_path();
2753 if (!path) {
2754 err = -ENOMEM;
2755 goto out_free_blocks;
2756 }
2757
2758 /* Kick in readahead for tree blocks with missing keys */
2759 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2760 if (!block->key_ready)
2761 btrfs_readahead_tree_block(fs_info, block->bytenr,
2762 block->owner, 0,
2763 block->level);
2764 }
2765
2766 /* Get first keys */
2767 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2768 if (!block->key_ready) {
2769 err = get_tree_block_key(fs_info, block);
2770 if (err)
2771 goto out_free_path;
2772 }
2773 }
2774
2775 /* Do tree relocation */
2776 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2777 node = build_backref_tree(rc, &block->key,
2778 block->level, block->bytenr);
2779 if (IS_ERR(node)) {
2780 err = PTR_ERR(node);
2781 goto out;
2782 }
2783
2784 ret = relocate_tree_block(trans, rc, node, &block->key,
2785 path);
2786 if (ret < 0) {
2787 err = ret;
2788 break;
2789 }
2790 }
2791out:
2792 err = finish_pending_nodes(trans, rc, path, err);
2793
2794out_free_path:
2795 btrfs_free_path(path);
2796out_free_blocks:
2797 free_block_list(blocks);
2798 return err;
2799}
2800
2801static noinline_for_stack int prealloc_file_extent_cluster(
2802 struct btrfs_inode *inode,
2803 struct file_extent_cluster *cluster)
2804{
2805 u64 alloc_hint = 0;
2806 u64 start;
2807 u64 end;
2808 u64 offset = inode->index_cnt;
2809 u64 num_bytes;
2810 int nr;
2811 int ret = 0;
2812 u64 i_size = i_size_read(&inode->vfs_inode);
2813 u64 prealloc_start = cluster->start - offset;
2814 u64 prealloc_end = cluster->end - offset;
2815 u64 cur_offset = prealloc_start;
2816
2817 /*
2818 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2819 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2820 * btrfs_do_readpage() call of previously relocated file cluster.
2821 *
2822 * If the current cluster starts in the above range, btrfs_do_readpage()
2823 * will skip the read, and relocate_one_page() will later writeback
2824 * the padding zeros as new data, causing data corruption.
2825 *
2826 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2827 */
2828 if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
2829 struct address_space *mapping = inode->vfs_inode.i_mapping;
2830 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2831 const u32 sectorsize = fs_info->sectorsize;
2832 struct page *page;
2833
2834 ASSERT(sectorsize < PAGE_SIZE);
2835 ASSERT(IS_ALIGNED(i_size, sectorsize));
2836
2837 /*
2838 * Subpage can't handle page with DIRTY but without UPTODATE
2839 * bit as it can lead to the following deadlock:
2840 *
2841 * btrfs_read_folio()
2842 * | Page already *locked*
2843 * |- btrfs_lock_and_flush_ordered_range()
2844 * |- btrfs_start_ordered_extent()
2845 * |- extent_write_cache_pages()
2846 * |- lock_page()
2847 * We try to lock the page we already hold.
2848 *
2849 * Here we just writeback the whole data reloc inode, so that
2850 * we will be ensured to have no dirty range in the page, and
2851 * are safe to clear the uptodate bits.
2852 *
2853 * This shouldn't cause too much overhead, as we need to write
2854 * the data back anyway.
2855 */
2856 ret = filemap_write_and_wait(mapping);
2857 if (ret < 0)
2858 return ret;
2859
2860 clear_extent_bits(&inode->io_tree, i_size,
2861 round_up(i_size, PAGE_SIZE) - 1,
2862 EXTENT_UPTODATE);
2863 page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
2864 /*
2865 * If page is freed we don't need to do anything then, as we
2866 * will re-read the whole page anyway.
2867 */
2868 if (page) {
2869 btrfs_subpage_clear_uptodate(fs_info, page, i_size,
2870 round_up(i_size, PAGE_SIZE) - i_size);
2871 unlock_page(page);
2872 put_page(page);
2873 }
2874 }
2875
2876 BUG_ON(cluster->start != cluster->boundary[0]);
2877 ret = btrfs_alloc_data_chunk_ondemand(inode,
2878 prealloc_end + 1 - prealloc_start);
2879 if (ret)
2880 return ret;
2881
2882 btrfs_inode_lock(inode, 0);
2883 for (nr = 0; nr < cluster->nr; nr++) {
2884 struct extent_state *cached_state = NULL;
2885
2886 start = cluster->boundary[nr] - offset;
2887 if (nr + 1 < cluster->nr)
2888 end = cluster->boundary[nr + 1] - 1 - offset;
2889 else
2890 end = cluster->end - offset;
2891
2892 lock_extent(&inode->io_tree, start, end, &cached_state);
2893 num_bytes = end + 1 - start;
2894 ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2895 num_bytes, num_bytes,
2896 end + 1, &alloc_hint);
2897 cur_offset = end + 1;
2898 unlock_extent(&inode->io_tree, start, end, &cached_state);
2899 if (ret)
2900 break;
2901 }
2902 btrfs_inode_unlock(inode, 0);
2903
2904 if (cur_offset < prealloc_end)
2905 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2906 prealloc_end + 1 - cur_offset);
2907 return ret;
2908}
2909
2910static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
2911 u64 start, u64 end, u64 block_start)
2912{
2913 struct extent_map *em;
2914 struct extent_state *cached_state = NULL;
2915 int ret = 0;
2916
2917 em = alloc_extent_map();
2918 if (!em)
2919 return -ENOMEM;
2920
2921 em->start = start;
2922 em->len = end + 1 - start;
2923 em->block_len = em->len;
2924 em->block_start = block_start;
2925 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2926
2927 lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2928 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
2929 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2930 free_extent_map(em);
2931
2932 return ret;
2933}
2934
2935/*
2936 * Allow error injection to test balance/relocation cancellation
2937 */
2938noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2939{
2940 return atomic_read(&fs_info->balance_cancel_req) ||
2941 atomic_read(&fs_info->reloc_cancel_req) ||
2942 fatal_signal_pending(current);
2943}
2944ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2945
2946static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
2947 int cluster_nr)
2948{
2949 /* Last extent, use cluster end directly */
2950 if (cluster_nr >= cluster->nr - 1)
2951 return cluster->end;
2952
2953 /* Use next boundary start*/
2954 return cluster->boundary[cluster_nr + 1] - 1;
2955}
2956
2957static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
2958 struct file_extent_cluster *cluster,
2959 int *cluster_nr, unsigned long page_index)
2960{
2961 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2962 u64 offset = BTRFS_I(inode)->index_cnt;
2963 const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2964 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2965 struct page *page;
2966 u64 page_start;
2967 u64 page_end;
2968 u64 cur;
2969 int ret;
2970
2971 ASSERT(page_index <= last_index);
2972 page = find_lock_page(inode->i_mapping, page_index);
2973 if (!page) {
2974 page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2975 page_index, last_index + 1 - page_index);
2976 page = find_or_create_page(inode->i_mapping, page_index, mask);
2977 if (!page)
2978 return -ENOMEM;
2979 }
2980 ret = set_page_extent_mapped(page);
2981 if (ret < 0)
2982 goto release_page;
2983
2984 if (PageReadahead(page))
2985 page_cache_async_readahead(inode->i_mapping, ra, NULL,
2986 page_folio(page), page_index,
2987 last_index + 1 - page_index);
2988
2989 if (!PageUptodate(page)) {
2990 btrfs_read_folio(NULL, page_folio(page));
2991 lock_page(page);
2992 if (!PageUptodate(page)) {
2993 ret = -EIO;
2994 goto release_page;
2995 }
2996 }
2997
2998 page_start = page_offset(page);
2999 page_end = page_start + PAGE_SIZE - 1;
3000
3001 /*
3002 * Start from the cluster, as for subpage case, the cluster can start
3003 * inside the page.
3004 */
3005 cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
3006 while (cur <= page_end) {
3007 struct extent_state *cached_state = NULL;
3008 u64 extent_start = cluster->boundary[*cluster_nr] - offset;
3009 u64 extent_end = get_cluster_boundary_end(cluster,
3010 *cluster_nr) - offset;
3011 u64 clamped_start = max(page_start, extent_start);
3012 u64 clamped_end = min(page_end, extent_end);
3013 u32 clamped_len = clamped_end + 1 - clamped_start;
3014
3015 /* Reserve metadata for this range */
3016 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3017 clamped_len, clamped_len,
3018 false);
3019 if (ret)
3020 goto release_page;
3021
3022 /* Mark the range delalloc and dirty for later writeback */
3023 lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3024 &cached_state);
3025 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
3026 clamped_end, 0, &cached_state);
3027 if (ret) {
3028 clear_extent_bit(&BTRFS_I(inode)->io_tree,
3029 clamped_start, clamped_end,
3030 EXTENT_LOCKED | EXTENT_BOUNDARY,
3031 &cached_state);
3032 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3033 clamped_len, true);
3034 btrfs_delalloc_release_extents(BTRFS_I(inode),
3035 clamped_len);
3036 goto release_page;
3037 }
3038 btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
3039
3040 /*
3041 * Set the boundary if it's inside the page.
3042 * Data relocation requires the destination extents to have the
3043 * same size as the source.
3044 * EXTENT_BOUNDARY bit prevents current extent from being merged
3045 * with previous extent.
3046 */
3047 if (in_range(cluster->boundary[*cluster_nr] - offset,
3048 page_start, PAGE_SIZE)) {
3049 u64 boundary_start = cluster->boundary[*cluster_nr] -
3050 offset;
3051 u64 boundary_end = boundary_start +
3052 fs_info->sectorsize - 1;
3053
3054 set_extent_bits(&BTRFS_I(inode)->io_tree,
3055 boundary_start, boundary_end,
3056 EXTENT_BOUNDARY);
3057 }
3058 unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3059 &cached_state);
3060 btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
3061 cur += clamped_len;
3062
3063 /* Crossed extent end, go to next extent */
3064 if (cur >= extent_end) {
3065 (*cluster_nr)++;
3066 /* Just finished the last extent of the cluster, exit. */
3067 if (*cluster_nr >= cluster->nr)
3068 break;
3069 }
3070 }
3071 unlock_page(page);
3072 put_page(page);
3073
3074 balance_dirty_pages_ratelimited(inode->i_mapping);
3075 btrfs_throttle(fs_info);
3076 if (btrfs_should_cancel_balance(fs_info))
3077 ret = -ECANCELED;
3078 return ret;
3079
3080release_page:
3081 unlock_page(page);
3082 put_page(page);
3083 return ret;
3084}
3085
3086static int relocate_file_extent_cluster(struct inode *inode,
3087 struct file_extent_cluster *cluster)
3088{
3089 u64 offset = BTRFS_I(inode)->index_cnt;
3090 unsigned long index;
3091 unsigned long last_index;
3092 struct file_ra_state *ra;
3093 int cluster_nr = 0;
3094 int ret = 0;
3095
3096 if (!cluster->nr)
3097 return 0;
3098
3099 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3100 if (!ra)
3101 return -ENOMEM;
3102
3103 ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
3104 if (ret)
3105 goto out;
3106
3107 file_ra_state_init(ra, inode->i_mapping);
3108
3109 ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
3110 cluster->end - offset, cluster->start);
3111 if (ret)
3112 goto out;
3113
3114 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3115 for (index = (cluster->start - offset) >> PAGE_SHIFT;
3116 index <= last_index && !ret; index++)
3117 ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
3118 if (ret == 0)
3119 WARN_ON(cluster_nr != cluster->nr);
3120out:
3121 kfree(ra);
3122 return ret;
3123}
3124
3125static noinline_for_stack
3126int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3127 struct file_extent_cluster *cluster)
3128{
3129 int ret;
3130
3131 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3132 ret = relocate_file_extent_cluster(inode, cluster);
3133 if (ret)
3134 return ret;
3135 cluster->nr = 0;
3136 }
3137
3138 if (!cluster->nr)
3139 cluster->start = extent_key->objectid;
3140 else
3141 BUG_ON(cluster->nr >= MAX_EXTENTS);
3142 cluster->end = extent_key->objectid + extent_key->offset - 1;
3143 cluster->boundary[cluster->nr] = extent_key->objectid;
3144 cluster->nr++;
3145
3146 if (cluster->nr >= MAX_EXTENTS) {
3147 ret = relocate_file_extent_cluster(inode, cluster);
3148 if (ret)
3149 return ret;
3150 cluster->nr = 0;
3151 }
3152 return 0;
3153}
3154
3155/*
3156 * helper to add a tree block to the list.
3157 * the major work is getting the generation and level of the block
3158 */
3159static int add_tree_block(struct reloc_control *rc,
3160 struct btrfs_key *extent_key,
3161 struct btrfs_path *path,
3162 struct rb_root *blocks)
3163{
3164 struct extent_buffer *eb;
3165 struct btrfs_extent_item *ei;
3166 struct btrfs_tree_block_info *bi;
3167 struct tree_block *block;
3168 struct rb_node *rb_node;
3169 u32 item_size;
3170 int level = -1;
3171 u64 generation;
3172 u64 owner = 0;
3173
3174 eb = path->nodes[0];
3175 item_size = btrfs_item_size(eb, path->slots[0]);
3176
3177 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3178 item_size >= sizeof(*ei) + sizeof(*bi)) {
3179 unsigned long ptr = 0, end;
3180
3181 ei = btrfs_item_ptr(eb, path->slots[0],
3182 struct btrfs_extent_item);
3183 end = (unsigned long)ei + item_size;
3184 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3185 bi = (struct btrfs_tree_block_info *)(ei + 1);
3186 level = btrfs_tree_block_level(eb, bi);
3187 ptr = (unsigned long)(bi + 1);
3188 } else {
3189 level = (int)extent_key->offset;
3190 ptr = (unsigned long)(ei + 1);
3191 }
3192 generation = btrfs_extent_generation(eb, ei);
3193
3194 /*
3195 * We're reading random blocks without knowing their owner ahead
3196 * of time. This is ok most of the time, as all reloc roots and
3197 * fs roots have the same lock type. However normal trees do
3198 * not, and the only way to know ahead of time is to read the
3199 * inline ref offset. We know it's an fs root if
3200 *
3201 * 1. There's more than one ref.
3202 * 2. There's a SHARED_DATA_REF_KEY set.
3203 * 3. FULL_BACKREF is set on the flags.
3204 *
3205 * Otherwise it's safe to assume that the ref offset == the
3206 * owner of this block, so we can use that when calling
3207 * read_tree_block.
3208 */
3209 if (btrfs_extent_refs(eb, ei) == 1 &&
3210 !(btrfs_extent_flags(eb, ei) &
3211 BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3212 ptr < end) {
3213 struct btrfs_extent_inline_ref *iref;
3214 int type;
3215
3216 iref = (struct btrfs_extent_inline_ref *)ptr;
3217 type = btrfs_get_extent_inline_ref_type(eb, iref,
3218 BTRFS_REF_TYPE_BLOCK);
3219 if (type == BTRFS_REF_TYPE_INVALID)
3220 return -EINVAL;
3221 if (type == BTRFS_TREE_BLOCK_REF_KEY)
3222 owner = btrfs_extent_inline_ref_offset(eb, iref);
3223 }
3224 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3225 btrfs_print_v0_err(eb->fs_info);
3226 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3227 return -EINVAL;
3228 } else {
3229 BUG();
3230 }
3231
3232 btrfs_release_path(path);
3233
3234 BUG_ON(level == -1);
3235
3236 block = kmalloc(sizeof(*block), GFP_NOFS);
3237 if (!block)
3238 return -ENOMEM;
3239
3240 block->bytenr = extent_key->objectid;
3241 block->key.objectid = rc->extent_root->fs_info->nodesize;
3242 block->key.offset = generation;
3243 block->level = level;
3244 block->key_ready = 0;
3245 block->owner = owner;
3246
3247 rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3248 if (rb_node)
3249 btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3250 -EEXIST);
3251
3252 return 0;
3253}
3254
3255/*
3256 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3257 */
3258static int __add_tree_block(struct reloc_control *rc,
3259 u64 bytenr, u32 blocksize,
3260 struct rb_root *blocks)
3261{
3262 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3263 struct btrfs_path *path;
3264 struct btrfs_key key;
3265 int ret;
3266 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3267
3268 if (tree_block_processed(bytenr, rc))
3269 return 0;
3270
3271 if (rb_simple_search(blocks, bytenr))
3272 return 0;
3273
3274 path = btrfs_alloc_path();
3275 if (!path)
3276 return -ENOMEM;
3277again:
3278 key.objectid = bytenr;
3279 if (skinny) {
3280 key.type = BTRFS_METADATA_ITEM_KEY;
3281 key.offset = (u64)-1;
3282 } else {
3283 key.type = BTRFS_EXTENT_ITEM_KEY;
3284 key.offset = blocksize;
3285 }
3286
3287 path->search_commit_root = 1;
3288 path->skip_locking = 1;
3289 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3290 if (ret < 0)
3291 goto out;
3292
3293 if (ret > 0 && skinny) {
3294 if (path->slots[0]) {
3295 path->slots[0]--;
3296 btrfs_item_key_to_cpu(path->nodes[0], &key,
3297 path->slots[0]);
3298 if (key.objectid == bytenr &&
3299 (key.type == BTRFS_METADATA_ITEM_KEY ||
3300 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3301 key.offset == blocksize)))
3302 ret = 0;
3303 }
3304
3305 if (ret) {
3306 skinny = false;
3307 btrfs_release_path(path);
3308 goto again;
3309 }
3310 }
3311 if (ret) {
3312 ASSERT(ret == 1);
3313 btrfs_print_leaf(path->nodes[0]);
3314 btrfs_err(fs_info,
3315 "tree block extent item (%llu) is not found in extent tree",
3316 bytenr);
3317 WARN_ON(1);
3318 ret = -EINVAL;
3319 goto out;
3320 }
3321
3322 ret = add_tree_block(rc, &key, path, blocks);
3323out:
3324 btrfs_free_path(path);
3325 return ret;
3326}
3327
3328static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3329 struct btrfs_block_group *block_group,
3330 struct inode *inode,
3331 u64 ino)
3332{
3333 struct btrfs_root *root = fs_info->tree_root;
3334 struct btrfs_trans_handle *trans;
3335 int ret = 0;
3336
3337 if (inode)
3338 goto truncate;
3339
3340 inode = btrfs_iget(fs_info->sb, ino, root);
3341 if (IS_ERR(inode))
3342 return -ENOENT;
3343
3344truncate:
3345 ret = btrfs_check_trunc_cache_free_space(fs_info,
3346 &fs_info->global_block_rsv);
3347 if (ret)
3348 goto out;
3349
3350 trans = btrfs_join_transaction(root);
3351 if (IS_ERR(trans)) {
3352 ret = PTR_ERR(trans);
3353 goto out;
3354 }
3355
3356 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3357
3358 btrfs_end_transaction(trans);
3359 btrfs_btree_balance_dirty(fs_info);
3360out:
3361 iput(inode);
3362 return ret;
3363}
3364
3365/*
3366 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3367 * cache inode, to avoid free space cache data extent blocking data relocation.
3368 */
3369static int delete_v1_space_cache(struct extent_buffer *leaf,
3370 struct btrfs_block_group *block_group,
3371 u64 data_bytenr)
3372{
3373 u64 space_cache_ino;
3374 struct btrfs_file_extent_item *ei;
3375 struct btrfs_key key;
3376 bool found = false;
3377 int i;
3378 int ret;
3379
3380 if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3381 return 0;
3382
3383 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3384 u8 type;
3385
3386 btrfs_item_key_to_cpu(leaf, &key, i);
3387 if (key.type != BTRFS_EXTENT_DATA_KEY)
3388 continue;
3389 ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3390 type = btrfs_file_extent_type(leaf, ei);
3391
3392 if ((type == BTRFS_FILE_EXTENT_REG ||
3393 type == BTRFS_FILE_EXTENT_PREALLOC) &&
3394 btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3395 found = true;
3396 space_cache_ino = key.objectid;
3397 break;
3398 }
3399 }
3400 if (!found)
3401 return -ENOENT;
3402 ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3403 space_cache_ino);
3404 return ret;
3405}
3406
3407/*
3408 * helper to find all tree blocks that reference a given data extent
3409 */
3410static noinline_for_stack
3411int add_data_references(struct reloc_control *rc,
3412 struct btrfs_key *extent_key,
3413 struct btrfs_path *path,
3414 struct rb_root *blocks)
3415{
3416 struct btrfs_backref_walk_ctx ctx = { 0 };
3417 struct ulist_iterator leaf_uiter;
3418 struct ulist_node *ref_node = NULL;
3419 const u32 blocksize = rc->extent_root->fs_info->nodesize;
3420 int ret = 0;
3421
3422 btrfs_release_path(path);
3423
3424 ctx.bytenr = extent_key->objectid;
3425 ctx.ignore_extent_item_pos = true;
3426 ctx.fs_info = rc->extent_root->fs_info;
3427
3428 ret = btrfs_find_all_leafs(&ctx);
3429 if (ret < 0)
3430 return ret;
3431
3432 ULIST_ITER_INIT(&leaf_uiter);
3433 while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3434 struct btrfs_tree_parent_check check = { 0 };
3435 struct extent_buffer *eb;
3436
3437 eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
3438 if (IS_ERR(eb)) {
3439 ret = PTR_ERR(eb);
3440 break;
3441 }
3442 ret = delete_v1_space_cache(eb, rc->block_group,
3443 extent_key->objectid);
3444 free_extent_buffer(eb);
3445 if (ret < 0)
3446 break;
3447 ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3448 if (ret < 0)
3449 break;
3450 }
3451 if (ret < 0)
3452 free_block_list(blocks);
3453 ulist_free(ctx.refs);
3454 return ret;
3455}
3456
3457/*
3458 * helper to find next unprocessed extent
3459 */
3460static noinline_for_stack
3461int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3462 struct btrfs_key *extent_key)
3463{
3464 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3465 struct btrfs_key key;
3466 struct extent_buffer *leaf;
3467 u64 start, end, last;
3468 int ret;
3469
3470 last = rc->block_group->start + rc->block_group->length;
3471 while (1) {
3472 cond_resched();
3473 if (rc->search_start >= last) {
3474 ret = 1;
3475 break;
3476 }
3477
3478 key.objectid = rc->search_start;
3479 key.type = BTRFS_EXTENT_ITEM_KEY;
3480 key.offset = 0;
3481
3482 path->search_commit_root = 1;
3483 path->skip_locking = 1;
3484 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3485 0, 0);
3486 if (ret < 0)
3487 break;
3488next:
3489 leaf = path->nodes[0];
3490 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3491 ret = btrfs_next_leaf(rc->extent_root, path);
3492 if (ret != 0)
3493 break;
3494 leaf = path->nodes[0];
3495 }
3496
3497 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3498 if (key.objectid >= last) {
3499 ret = 1;
3500 break;
3501 }
3502
3503 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3504 key.type != BTRFS_METADATA_ITEM_KEY) {
3505 path->slots[0]++;
3506 goto next;
3507 }
3508
3509 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3510 key.objectid + key.offset <= rc->search_start) {
3511 path->slots[0]++;
3512 goto next;
3513 }
3514
3515 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3516 key.objectid + fs_info->nodesize <=
3517 rc->search_start) {
3518 path->slots[0]++;
3519 goto next;
3520 }
3521
3522 ret = find_first_extent_bit(&rc->processed_blocks,
3523 key.objectid, &start, &end,
3524 EXTENT_DIRTY, NULL);
3525
3526 if (ret == 0 && start <= key.objectid) {
3527 btrfs_release_path(path);
3528 rc->search_start = end + 1;
3529 } else {
3530 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3531 rc->search_start = key.objectid + key.offset;
3532 else
3533 rc->search_start = key.objectid +
3534 fs_info->nodesize;
3535 memcpy(extent_key, &key, sizeof(key));
3536 return 0;
3537 }
3538 }
3539 btrfs_release_path(path);
3540 return ret;
3541}
3542
3543static void set_reloc_control(struct reloc_control *rc)
3544{
3545 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3546
3547 mutex_lock(&fs_info->reloc_mutex);
3548 fs_info->reloc_ctl = rc;
3549 mutex_unlock(&fs_info->reloc_mutex);
3550}
3551
3552static void unset_reloc_control(struct reloc_control *rc)
3553{
3554 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3555
3556 mutex_lock(&fs_info->reloc_mutex);
3557 fs_info->reloc_ctl = NULL;
3558 mutex_unlock(&fs_info->reloc_mutex);
3559}
3560
3561static noinline_for_stack
3562int prepare_to_relocate(struct reloc_control *rc)
3563{
3564 struct btrfs_trans_handle *trans;
3565 int ret;
3566
3567 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3568 BTRFS_BLOCK_RSV_TEMP);
3569 if (!rc->block_rsv)
3570 return -ENOMEM;
3571
3572 memset(&rc->cluster, 0, sizeof(rc->cluster));
3573 rc->search_start = rc->block_group->start;
3574 rc->extents_found = 0;
3575 rc->nodes_relocated = 0;
3576 rc->merging_rsv_size = 0;
3577 rc->reserved_bytes = 0;
3578 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3579 RELOCATION_RESERVED_NODES;
3580 ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3581 rc->block_rsv, rc->block_rsv->size,
3582 BTRFS_RESERVE_FLUSH_ALL);
3583 if (ret)
3584 return ret;
3585
3586 rc->create_reloc_tree = 1;
3587 set_reloc_control(rc);
3588
3589 trans = btrfs_join_transaction(rc->extent_root);
3590 if (IS_ERR(trans)) {
3591 unset_reloc_control(rc);
3592 /*
3593 * extent tree is not a ref_cow tree and has no reloc_root to
3594 * cleanup. And callers are responsible to free the above
3595 * block rsv.
3596 */
3597 return PTR_ERR(trans);
3598 }
3599
3600 ret = btrfs_commit_transaction(trans);
3601 if (ret)
3602 unset_reloc_control(rc);
3603
3604 return ret;
3605}
3606
3607static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3608{
3609 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3610 struct rb_root blocks = RB_ROOT;
3611 struct btrfs_key key;
3612 struct btrfs_trans_handle *trans = NULL;
3613 struct btrfs_path *path;
3614 struct btrfs_extent_item *ei;
3615 u64 flags;
3616 int ret;
3617 int err = 0;
3618 int progress = 0;
3619
3620 path = btrfs_alloc_path();
3621 if (!path)
3622 return -ENOMEM;
3623 path->reada = READA_FORWARD;
3624
3625 ret = prepare_to_relocate(rc);
3626 if (ret) {
3627 err = ret;
3628 goto out_free;
3629 }
3630
3631 while (1) {
3632 rc->reserved_bytes = 0;
3633 ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3634 rc->block_rsv->size,
3635 BTRFS_RESERVE_FLUSH_ALL);
3636 if (ret) {
3637 err = ret;
3638 break;
3639 }
3640 progress++;
3641 trans = btrfs_start_transaction(rc->extent_root, 0);
3642 if (IS_ERR(trans)) {
3643 err = PTR_ERR(trans);
3644 trans = NULL;
3645 break;
3646 }
3647restart:
3648 if (update_backref_cache(trans, &rc->backref_cache)) {
3649 btrfs_end_transaction(trans);
3650 trans = NULL;
3651 continue;
3652 }
3653
3654 ret = find_next_extent(rc, path, &key);
3655 if (ret < 0)
3656 err = ret;
3657 if (ret != 0)
3658 break;
3659
3660 rc->extents_found++;
3661
3662 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3663 struct btrfs_extent_item);
3664 flags = btrfs_extent_flags(path->nodes[0], ei);
3665
3666 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3667 ret = add_tree_block(rc, &key, path, &blocks);
3668 } else if (rc->stage == UPDATE_DATA_PTRS &&
3669 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3670 ret = add_data_references(rc, &key, path, &blocks);
3671 } else {
3672 btrfs_release_path(path);
3673 ret = 0;
3674 }
3675 if (ret < 0) {
3676 err = ret;
3677 break;
3678 }
3679
3680 if (!RB_EMPTY_ROOT(&blocks)) {
3681 ret = relocate_tree_blocks(trans, rc, &blocks);
3682 if (ret < 0) {
3683 if (ret != -EAGAIN) {
3684 err = ret;
3685 break;
3686 }
3687 rc->extents_found--;
3688 rc->search_start = key.objectid;
3689 }
3690 }
3691
3692 btrfs_end_transaction_throttle(trans);
3693 btrfs_btree_balance_dirty(fs_info);
3694 trans = NULL;
3695
3696 if (rc->stage == MOVE_DATA_EXTENTS &&
3697 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3698 rc->found_file_extent = 1;
3699 ret = relocate_data_extent(rc->data_inode,
3700 &key, &rc->cluster);
3701 if (ret < 0) {
3702 err = ret;
3703 break;
3704 }
3705 }
3706 if (btrfs_should_cancel_balance(fs_info)) {
3707 err = -ECANCELED;
3708 break;
3709 }
3710 }
3711 if (trans && progress && err == -ENOSPC) {
3712 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3713 if (ret == 1) {
3714 err = 0;
3715 progress = 0;
3716 goto restart;
3717 }
3718 }
3719
3720 btrfs_release_path(path);
3721 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3722
3723 if (trans) {
3724 btrfs_end_transaction_throttle(trans);
3725 btrfs_btree_balance_dirty(fs_info);
3726 }
3727
3728 if (!err) {
3729 ret = relocate_file_extent_cluster(rc->data_inode,
3730 &rc->cluster);
3731 if (ret < 0)
3732 err = ret;
3733 }
3734
3735 rc->create_reloc_tree = 0;
3736 set_reloc_control(rc);
3737
3738 btrfs_backref_release_cache(&rc->backref_cache);
3739 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3740
3741 /*
3742 * Even in the case when the relocation is cancelled, we should all go
3743 * through prepare_to_merge() and merge_reloc_roots().
3744 *
3745 * For error (including cancelled balance), prepare_to_merge() will
3746 * mark all reloc trees orphan, then queue them for cleanup in
3747 * merge_reloc_roots()
3748 */
3749 err = prepare_to_merge(rc, err);
3750
3751 merge_reloc_roots(rc);
3752
3753 rc->merge_reloc_tree = 0;
3754 unset_reloc_control(rc);
3755 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3756
3757 /* get rid of pinned extents */
3758 trans = btrfs_join_transaction(rc->extent_root);
3759 if (IS_ERR(trans)) {
3760 err = PTR_ERR(trans);
3761 goto out_free;
3762 }
3763 ret = btrfs_commit_transaction(trans);
3764 if (ret && !err)
3765 err = ret;
3766out_free:
3767 ret = clean_dirty_subvols(rc);
3768 if (ret < 0 && !err)
3769 err = ret;
3770 btrfs_free_block_rsv(fs_info, rc->block_rsv);
3771 btrfs_free_path(path);
3772 return err;
3773}
3774
3775static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3776 struct btrfs_root *root, u64 objectid)
3777{
3778 struct btrfs_path *path;
3779 struct btrfs_inode_item *item;
3780 struct extent_buffer *leaf;
3781 int ret;
3782
3783 path = btrfs_alloc_path();
3784 if (!path)
3785 return -ENOMEM;
3786
3787 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3788 if (ret)
3789 goto out;
3790
3791 leaf = path->nodes[0];
3792 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3793 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3794 btrfs_set_inode_generation(leaf, item, 1);
3795 btrfs_set_inode_size(leaf, item, 0);
3796 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3797 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3798 BTRFS_INODE_PREALLOC);
3799 btrfs_mark_buffer_dirty(leaf);
3800out:
3801 btrfs_free_path(path);
3802 return ret;
3803}
3804
3805static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3806 struct btrfs_root *root, u64 objectid)
3807{
3808 struct btrfs_path *path;
3809 struct btrfs_key key;
3810 int ret = 0;
3811
3812 path = btrfs_alloc_path();
3813 if (!path) {
3814 ret = -ENOMEM;
3815 goto out;
3816 }
3817
3818 key.objectid = objectid;
3819 key.type = BTRFS_INODE_ITEM_KEY;
3820 key.offset = 0;
3821 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3822 if (ret) {
3823 if (ret > 0)
3824 ret = -ENOENT;
3825 goto out;
3826 }
3827 ret = btrfs_del_item(trans, root, path);
3828out:
3829 if (ret)
3830 btrfs_abort_transaction(trans, ret);
3831 btrfs_free_path(path);
3832}
3833
3834/*
3835 * helper to create inode for data relocation.
3836 * the inode is in data relocation tree and its link count is 0
3837 */
3838static noinline_for_stack
3839struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3840 struct btrfs_block_group *group)
3841{
3842 struct inode *inode = NULL;
3843 struct btrfs_trans_handle *trans;
3844 struct btrfs_root *root;
3845 u64 objectid;
3846 int err = 0;
3847
3848 root = btrfs_grab_root(fs_info->data_reloc_root);
3849 trans = btrfs_start_transaction(root, 6);
3850 if (IS_ERR(trans)) {
3851 btrfs_put_root(root);
3852 return ERR_CAST(trans);
3853 }
3854
3855 err = btrfs_get_free_objectid(root, &objectid);
3856 if (err)
3857 goto out;
3858
3859 err = __insert_orphan_inode(trans, root, objectid);
3860 if (err)
3861 goto out;
3862
3863 inode = btrfs_iget(fs_info->sb, objectid, root);
3864 if (IS_ERR(inode)) {
3865 delete_orphan_inode(trans, root, objectid);
3866 err = PTR_ERR(inode);
3867 inode = NULL;
3868 goto out;
3869 }
3870 BTRFS_I(inode)->index_cnt = group->start;
3871
3872 err = btrfs_orphan_add(trans, BTRFS_I(inode));
3873out:
3874 btrfs_put_root(root);
3875 btrfs_end_transaction(trans);
3876 btrfs_btree_balance_dirty(fs_info);
3877 if (err) {
3878 iput(inode);
3879 inode = ERR_PTR(err);
3880 }
3881 return inode;
3882}
3883
3884/*
3885 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3886 * has been requested meanwhile and don't start in that case.
3887 *
3888 * Return:
3889 * 0 success
3890 * -EINPROGRESS operation is already in progress, that's probably a bug
3891 * -ECANCELED cancellation request was set before the operation started
3892 */
3893static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3894{
3895 if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3896 /* This should not happen */
3897 btrfs_err(fs_info, "reloc already running, cannot start");
3898 return -EINPROGRESS;
3899 }
3900
3901 if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3902 btrfs_info(fs_info, "chunk relocation canceled on start");
3903 /*
3904 * On cancel, clear all requests but let the caller mark
3905 * the end after cleanup operations.
3906 */
3907 atomic_set(&fs_info->reloc_cancel_req, 0);
3908 return -ECANCELED;
3909 }
3910 return 0;
3911}
3912
3913/*
3914 * Mark end of chunk relocation that is cancellable and wake any waiters.
3915 */
3916static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3917{
3918 /* Requested after start, clear bit first so any waiters can continue */
3919 if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3920 btrfs_info(fs_info, "chunk relocation canceled during operation");
3921 clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3922 atomic_set(&fs_info->reloc_cancel_req, 0);
3923}
3924
3925static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3926{
3927 struct reloc_control *rc;
3928
3929 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3930 if (!rc)
3931 return NULL;
3932
3933 INIT_LIST_HEAD(&rc->reloc_roots);
3934 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3935 btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3936 mapping_tree_init(&rc->reloc_root_tree);
3937 extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
3938 return rc;
3939}
3940
3941static void free_reloc_control(struct reloc_control *rc)
3942{
3943 struct mapping_node *node, *tmp;
3944
3945 free_reloc_roots(&rc->reloc_roots);
3946 rbtree_postorder_for_each_entry_safe(node, tmp,
3947 &rc->reloc_root_tree.rb_root, rb_node)
3948 kfree(node);
3949
3950 kfree(rc);
3951}
3952
3953/*
3954 * Print the block group being relocated
3955 */
3956static void describe_relocation(struct btrfs_fs_info *fs_info,
3957 struct btrfs_block_group *block_group)
3958{
3959 char buf[128] = {'\0'};
3960
3961 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3962
3963 btrfs_info(fs_info,
3964 "relocating block group %llu flags %s",
3965 block_group->start, buf);
3966}
3967
3968static const char *stage_to_string(int stage)
3969{
3970 if (stage == MOVE_DATA_EXTENTS)
3971 return "move data extents";
3972 if (stage == UPDATE_DATA_PTRS)
3973 return "update data pointers";
3974 return "unknown";
3975}
3976
3977/*
3978 * function to relocate all extents in a block group.
3979 */
3980int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3981{
3982 struct btrfs_block_group *bg;
3983 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3984 struct reloc_control *rc;
3985 struct inode *inode;
3986 struct btrfs_path *path;
3987 int ret;
3988 int rw = 0;
3989 int err = 0;
3990
3991 /*
3992 * This only gets set if we had a half-deleted snapshot on mount. We
3993 * cannot allow relocation to start while we're still trying to clean up
3994 * these pending deletions.
3995 */
3996 ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
3997 if (ret)
3998 return ret;
3999
4000 /* We may have been woken up by close_ctree, so bail if we're closing. */
4001 if (btrfs_fs_closing(fs_info))
4002 return -EINTR;
4003
4004 bg = btrfs_lookup_block_group(fs_info, group_start);
4005 if (!bg)
4006 return -ENOENT;
4007
4008 /*
4009 * Relocation of a data block group creates ordered extents. Without
4010 * sb_start_write(), we can freeze the filesystem while unfinished
4011 * ordered extents are left. Such ordered extents can cause a deadlock
4012 * e.g. when syncfs() is waiting for their completion but they can't
4013 * finish because they block when joining a transaction, due to the
4014 * fact that the freeze locks are being held in write mode.
4015 */
4016 if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
4017 ASSERT(sb_write_started(fs_info->sb));
4018
4019 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4020 btrfs_put_block_group(bg);
4021 return -ETXTBSY;
4022 }
4023
4024 rc = alloc_reloc_control(fs_info);
4025 if (!rc) {
4026 btrfs_put_block_group(bg);
4027 return -ENOMEM;
4028 }
4029
4030 ret = reloc_chunk_start(fs_info);
4031 if (ret < 0) {
4032 err = ret;
4033 goto out_put_bg;
4034 }
4035
4036 rc->extent_root = extent_root;
4037 rc->block_group = bg;
4038
4039 ret = btrfs_inc_block_group_ro(rc->block_group, true);
4040 if (ret) {
4041 err = ret;
4042 goto out;
4043 }
4044 rw = 1;
4045
4046 path = btrfs_alloc_path();
4047 if (!path) {
4048 err = -ENOMEM;
4049 goto out;
4050 }
4051
4052 inode = lookup_free_space_inode(rc->block_group, path);
4053 btrfs_free_path(path);
4054
4055 if (!IS_ERR(inode))
4056 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4057 else
4058 ret = PTR_ERR(inode);
4059
4060 if (ret && ret != -ENOENT) {
4061 err = ret;
4062 goto out;
4063 }
4064
4065 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4066 if (IS_ERR(rc->data_inode)) {
4067 err = PTR_ERR(rc->data_inode);
4068 rc->data_inode = NULL;
4069 goto out;
4070 }
4071
4072 describe_relocation(fs_info, rc->block_group);
4073
4074 btrfs_wait_block_group_reservations(rc->block_group);
4075 btrfs_wait_nocow_writers(rc->block_group);
4076 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4077 rc->block_group->start,
4078 rc->block_group->length);
4079
4080 ret = btrfs_zone_finish(rc->block_group);
4081 WARN_ON(ret && ret != -EAGAIN);
4082
4083 while (1) {
4084 int finishes_stage;
4085
4086 mutex_lock(&fs_info->cleaner_mutex);
4087 ret = relocate_block_group(rc);
4088 mutex_unlock(&fs_info->cleaner_mutex);
4089 if (ret < 0)
4090 err = ret;
4091
4092 finishes_stage = rc->stage;
4093 /*
4094 * We may have gotten ENOSPC after we already dirtied some
4095 * extents. If writeout happens while we're relocating a
4096 * different block group we could end up hitting the
4097 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4098 * btrfs_reloc_cow_block. Make sure we write everything out
4099 * properly so we don't trip over this problem, and then break
4100 * out of the loop if we hit an error.
4101 */
4102 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4103 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4104 (u64)-1);
4105 if (ret)
4106 err = ret;
4107 invalidate_mapping_pages(rc->data_inode->i_mapping,
4108 0, -1);
4109 rc->stage = UPDATE_DATA_PTRS;
4110 }
4111
4112 if (err < 0)
4113 goto out;
4114
4115 if (rc->extents_found == 0)
4116 break;
4117
4118 btrfs_info(fs_info, "found %llu extents, stage: %s",
4119 rc->extents_found, stage_to_string(finishes_stage));
4120 }
4121
4122 WARN_ON(rc->block_group->pinned > 0);
4123 WARN_ON(rc->block_group->reserved > 0);
4124 WARN_ON(rc->block_group->used > 0);
4125out:
4126 if (err && rw)
4127 btrfs_dec_block_group_ro(rc->block_group);
4128 iput(rc->data_inode);
4129out_put_bg:
4130 btrfs_put_block_group(bg);
4131 reloc_chunk_end(fs_info);
4132 free_reloc_control(rc);
4133 return err;
4134}
4135
4136static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4137{
4138 struct btrfs_fs_info *fs_info = root->fs_info;
4139 struct btrfs_trans_handle *trans;
4140 int ret, err;
4141
4142 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4143 if (IS_ERR(trans))
4144 return PTR_ERR(trans);
4145
4146 memset(&root->root_item.drop_progress, 0,
4147 sizeof(root->root_item.drop_progress));
4148 btrfs_set_root_drop_level(&root->root_item, 0);
4149 btrfs_set_root_refs(&root->root_item, 0);
4150 ret = btrfs_update_root(trans, fs_info->tree_root,
4151 &root->root_key, &root->root_item);
4152
4153 err = btrfs_end_transaction(trans);
4154 if (err)
4155 return err;
4156 return ret;
4157}
4158
4159/*
4160 * recover relocation interrupted by system crash.
4161 *
4162 * this function resumes merging reloc trees with corresponding fs trees.
4163 * this is important for keeping the sharing of tree blocks
4164 */
4165int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4166{
4167 LIST_HEAD(reloc_roots);
4168 struct btrfs_key key;
4169 struct btrfs_root *fs_root;
4170 struct btrfs_root *reloc_root;
4171 struct btrfs_path *path;
4172 struct extent_buffer *leaf;
4173 struct reloc_control *rc = NULL;
4174 struct btrfs_trans_handle *trans;
4175 int ret;
4176 int err = 0;
4177
4178 path = btrfs_alloc_path();
4179 if (!path)
4180 return -ENOMEM;
4181 path->reada = READA_BACK;
4182
4183 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4184 key.type = BTRFS_ROOT_ITEM_KEY;
4185 key.offset = (u64)-1;
4186
4187 while (1) {
4188 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4189 path, 0, 0);
4190 if (ret < 0) {
4191 err = ret;
4192 goto out;
4193 }
4194 if (ret > 0) {
4195 if (path->slots[0] == 0)
4196 break;
4197 path->slots[0]--;
4198 }
4199 leaf = path->nodes[0];
4200 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4201 btrfs_release_path(path);
4202
4203 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4204 key.type != BTRFS_ROOT_ITEM_KEY)
4205 break;
4206
4207 reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4208 if (IS_ERR(reloc_root)) {
4209 err = PTR_ERR(reloc_root);
4210 goto out;
4211 }
4212
4213 set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4214 list_add(&reloc_root->root_list, &reloc_roots);
4215
4216 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4217 fs_root = btrfs_get_fs_root(fs_info,
4218 reloc_root->root_key.offset, false);
4219 if (IS_ERR(fs_root)) {
4220 ret = PTR_ERR(fs_root);
4221 if (ret != -ENOENT) {
4222 err = ret;
4223 goto out;
4224 }
4225 ret = mark_garbage_root(reloc_root);
4226 if (ret < 0) {
4227 err = ret;
4228 goto out;
4229 }
4230 } else {
4231 btrfs_put_root(fs_root);
4232 }
4233 }
4234
4235 if (key.offset == 0)
4236 break;
4237
4238 key.offset--;
4239 }
4240 btrfs_release_path(path);
4241
4242 if (list_empty(&reloc_roots))
4243 goto out;
4244
4245 rc = alloc_reloc_control(fs_info);
4246 if (!rc) {
4247 err = -ENOMEM;
4248 goto out;
4249 }
4250
4251 ret = reloc_chunk_start(fs_info);
4252 if (ret < 0) {
4253 err = ret;
4254 goto out_end;
4255 }
4256
4257 rc->extent_root = btrfs_extent_root(fs_info, 0);
4258
4259 set_reloc_control(rc);
4260
4261 trans = btrfs_join_transaction(rc->extent_root);
4262 if (IS_ERR(trans)) {
4263 err = PTR_ERR(trans);
4264 goto out_unset;
4265 }
4266
4267 rc->merge_reloc_tree = 1;
4268
4269 while (!list_empty(&reloc_roots)) {
4270 reloc_root = list_entry(reloc_roots.next,
4271 struct btrfs_root, root_list);
4272 list_del(&reloc_root->root_list);
4273
4274 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4275 list_add_tail(&reloc_root->root_list,
4276 &rc->reloc_roots);
4277 continue;
4278 }
4279
4280 fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4281 false);
4282 if (IS_ERR(fs_root)) {
4283 err = PTR_ERR(fs_root);
4284 list_add_tail(&reloc_root->root_list, &reloc_roots);
4285 btrfs_end_transaction(trans);
4286 goto out_unset;
4287 }
4288
4289 err = __add_reloc_root(reloc_root);
4290 ASSERT(err != -EEXIST);
4291 if (err) {
4292 list_add_tail(&reloc_root->root_list, &reloc_roots);
4293 btrfs_put_root(fs_root);
4294 btrfs_end_transaction(trans);
4295 goto out_unset;
4296 }
4297 fs_root->reloc_root = btrfs_grab_root(reloc_root);
4298 btrfs_put_root(fs_root);
4299 }
4300
4301 err = btrfs_commit_transaction(trans);
4302 if (err)
4303 goto out_unset;
4304
4305 merge_reloc_roots(rc);
4306
4307 unset_reloc_control(rc);
4308
4309 trans = btrfs_join_transaction(rc->extent_root);
4310 if (IS_ERR(trans)) {
4311 err = PTR_ERR(trans);
4312 goto out_clean;
4313 }
4314 err = btrfs_commit_transaction(trans);
4315out_clean:
4316 ret = clean_dirty_subvols(rc);
4317 if (ret < 0 && !err)
4318 err = ret;
4319out_unset:
4320 unset_reloc_control(rc);
4321out_end:
4322 reloc_chunk_end(fs_info);
4323 free_reloc_control(rc);
4324out:
4325 free_reloc_roots(&reloc_roots);
4326
4327 btrfs_free_path(path);
4328
4329 if (err == 0) {
4330 /* cleanup orphan inode in data relocation tree */
4331 fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4332 ASSERT(fs_root);
4333 err = btrfs_orphan_cleanup(fs_root);
4334 btrfs_put_root(fs_root);
4335 }
4336 return err;
4337}
4338
4339/*
4340 * helper to add ordered checksum for data relocation.
4341 *
4342 * cloning checksum properly handles the nodatasum extents.
4343 * it also saves CPU time to re-calculate the checksum.
4344 */
4345int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
4346{
4347 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4348 struct btrfs_root *csum_root;
4349 struct btrfs_ordered_sum *sums;
4350 struct btrfs_ordered_extent *ordered;
4351 int ret;
4352 u64 disk_bytenr;
4353 u64 new_bytenr;
4354 LIST_HEAD(list);
4355
4356 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4357 BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4358
4359 disk_bytenr = file_pos + inode->index_cnt;
4360 csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4361 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
4362 disk_bytenr + len - 1, &list, 0, false);
4363 if (ret)
4364 goto out;
4365
4366 while (!list_empty(&list)) {
4367 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4368 list_del_init(&sums->list);
4369
4370 /*
4371 * We need to offset the new_bytenr based on where the csum is.
4372 * We need to do this because we will read in entire prealloc
4373 * extents but we may have written to say the middle of the
4374 * prealloc extent, so we need to make sure the csum goes with
4375 * the right disk offset.
4376 *
4377 * We can do this because the data reloc inode refers strictly
4378 * to the on disk bytes, so we don't have to worry about
4379 * disk_len vs real len like with real inodes since it's all
4380 * disk length.
4381 */
4382 new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4383 sums->bytenr = new_bytenr;
4384
4385 btrfs_add_ordered_sum(ordered, sums);
4386 }
4387out:
4388 btrfs_put_ordered_extent(ordered);
4389 return ret;
4390}
4391
4392int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4393 struct btrfs_root *root, struct extent_buffer *buf,
4394 struct extent_buffer *cow)
4395{
4396 struct btrfs_fs_info *fs_info = root->fs_info;
4397 struct reloc_control *rc;
4398 struct btrfs_backref_node *node;
4399 int first_cow = 0;
4400 int level;
4401 int ret = 0;
4402
4403 rc = fs_info->reloc_ctl;
4404 if (!rc)
4405 return 0;
4406
4407 BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4408
4409 level = btrfs_header_level(buf);
4410 if (btrfs_header_generation(buf) <=
4411 btrfs_root_last_snapshot(&root->root_item))
4412 first_cow = 1;
4413
4414 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4415 rc->create_reloc_tree) {
4416 WARN_ON(!first_cow && level == 0);
4417
4418 node = rc->backref_cache.path[level];
4419 BUG_ON(node->bytenr != buf->start &&
4420 node->new_bytenr != buf->start);
4421
4422 btrfs_backref_drop_node_buffer(node);
4423 atomic_inc(&cow->refs);
4424 node->eb = cow;
4425 node->new_bytenr = cow->start;
4426
4427 if (!node->pending) {
4428 list_move_tail(&node->list,
4429 &rc->backref_cache.pending[level]);
4430 node->pending = 1;
4431 }
4432
4433 if (first_cow)
4434 mark_block_processed(rc, node);
4435
4436 if (first_cow && level > 0)
4437 rc->nodes_relocated += buf->len;
4438 }
4439
4440 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4441 ret = replace_file_extents(trans, rc, root, cow);
4442 return ret;
4443}
4444
4445/*
4446 * called before creating snapshot. it calculates metadata reservation
4447 * required for relocating tree blocks in the snapshot
4448 */
4449void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4450 u64 *bytes_to_reserve)
4451{
4452 struct btrfs_root *root = pending->root;
4453 struct reloc_control *rc = root->fs_info->reloc_ctl;
4454
4455 if (!rc || !have_reloc_root(root))
4456 return;
4457
4458 if (!rc->merge_reloc_tree)
4459 return;
4460
4461 root = root->reloc_root;
4462 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4463 /*
4464 * relocation is in the stage of merging trees. the space
4465 * used by merging a reloc tree is twice the size of
4466 * relocated tree nodes in the worst case. half for cowing
4467 * the reloc tree, half for cowing the fs tree. the space
4468 * used by cowing the reloc tree will be freed after the
4469 * tree is dropped. if we create snapshot, cowing the fs
4470 * tree may use more space than it frees. so we need
4471 * reserve extra space.
4472 */
4473 *bytes_to_reserve += rc->nodes_relocated;
4474}
4475
4476/*
4477 * called after snapshot is created. migrate block reservation
4478 * and create reloc root for the newly created snapshot
4479 *
4480 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4481 * references held on the reloc_root, one for root->reloc_root and one for
4482 * rc->reloc_roots.
4483 */
4484int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4485 struct btrfs_pending_snapshot *pending)
4486{
4487 struct btrfs_root *root = pending->root;
4488 struct btrfs_root *reloc_root;
4489 struct btrfs_root *new_root;
4490 struct reloc_control *rc = root->fs_info->reloc_ctl;
4491 int ret;
4492
4493 if (!rc || !have_reloc_root(root))
4494 return 0;
4495
4496 rc = root->fs_info->reloc_ctl;
4497 rc->merging_rsv_size += rc->nodes_relocated;
4498
4499 if (rc->merge_reloc_tree) {
4500 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4501 rc->block_rsv,
4502 rc->nodes_relocated, true);
4503 if (ret)
4504 return ret;
4505 }
4506
4507 new_root = pending->snap;
4508 reloc_root = create_reloc_root(trans, root->reloc_root,
4509 new_root->root_key.objectid);
4510 if (IS_ERR(reloc_root))
4511 return PTR_ERR(reloc_root);
4512
4513 ret = __add_reloc_root(reloc_root);
4514 ASSERT(ret != -EEXIST);
4515 if (ret) {
4516 /* Pairs with create_reloc_root */
4517 btrfs_put_root(reloc_root);
4518 return ret;
4519 }
4520 new_root->reloc_root = btrfs_grab_root(reloc_root);
4521
4522 if (rc->create_reloc_tree)
4523 ret = clone_backref_node(trans, rc, root, reloc_root);
4524 return ret;
4525}