Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/pagemap.h>
   8#include <linux/writeback.h>
   9#include <linux/blkdev.h>
  10#include <linux/rbtree.h>
  11#include <linux/slab.h>
  12#include <linux/error-injection.h>
  13#include "ctree.h"
  14#include "disk-io.h"
  15#include "transaction.h"
  16#include "volumes.h"
  17#include "locking.h"
  18#include "btrfs_inode.h"
  19#include "async-thread.h"
  20#include "free-space-cache.h"
  21#include "qgroup.h"
  22#include "print-tree.h"
  23#include "delalloc-space.h"
  24#include "block-group.h"
  25#include "backref.h"
  26#include "misc.h"
  27#include "subpage.h"
  28#include "zoned.h"
  29#include "inode-item.h"
  30#include "space-info.h"
  31#include "fs.h"
  32#include "accessors.h"
  33#include "extent-tree.h"
  34#include "root-tree.h"
  35#include "file-item.h"
  36#include "relocation.h"
  37#include "super.h"
  38#include "tree-checker.h"
  39
  40/*
  41 * Relocation overview
  42 *
  43 * [What does relocation do]
  44 *
  45 * The objective of relocation is to relocate all extents of the target block
  46 * group to other block groups.
  47 * This is utilized by resize (shrink only), profile converting, compacting
  48 * space, or balance routine to spread chunks over devices.
  49 *
  50 * 		Before		|		After
  51 * ------------------------------------------------------------------
  52 *  BG A: 10 data extents	| BG A: deleted
  53 *  BG B:  2 data extents	| BG B: 10 data extents (2 old + 8 relocated)
  54 *  BG C:  1 extents		| BG C:  3 data extents (1 old + 2 relocated)
  55 *
  56 * [How does relocation work]
  57 *
  58 * 1.   Mark the target block group read-only
  59 *      New extents won't be allocated from the target block group.
  60 *
  61 * 2.1  Record each extent in the target block group
  62 *      To build a proper map of extents to be relocated.
  63 *
  64 * 2.2  Build data reloc tree and reloc trees
  65 *      Data reloc tree will contain an inode, recording all newly relocated
  66 *      data extents.
  67 *      There will be only one data reloc tree for one data block group.
  68 *
  69 *      Reloc tree will be a special snapshot of its source tree, containing
  70 *      relocated tree blocks.
  71 *      Each tree referring to a tree block in target block group will get its
  72 *      reloc tree built.
  73 *
  74 * 2.3  Swap source tree with its corresponding reloc tree
  75 *      Each involved tree only refers to new extents after swap.
  76 *
  77 * 3.   Cleanup reloc trees and data reloc tree.
  78 *      As old extents in the target block group are still referenced by reloc
  79 *      trees, we need to clean them up before really freeing the target block
  80 *      group.
  81 *
  82 * The main complexity is in steps 2.2 and 2.3.
  83 *
  84 * The entry point of relocation is relocate_block_group() function.
  85 */
  86
  87#define RELOCATION_RESERVED_NODES	256
  88/*
  89 * map address of tree root to tree
  90 */
  91struct mapping_node {
  92	struct {
  93		struct rb_node rb_node;
  94		u64 bytenr;
  95	}; /* Use rb_simle_node for search/insert */
  96	void *data;
  97};
  98
  99struct mapping_tree {
 100	struct rb_root rb_root;
 101	spinlock_t lock;
 102};
 103
 104/*
 105 * present a tree block to process
 106 */
 107struct tree_block {
 108	struct {
 109		struct rb_node rb_node;
 110		u64 bytenr;
 111	}; /* Use rb_simple_node for search/insert */
 112	u64 owner;
 113	struct btrfs_key key;
 114	unsigned int level:8;
 115	unsigned int key_ready:1;
 116};
 117
 118#define MAX_EXTENTS 128
 119
 120struct file_extent_cluster {
 121	u64 start;
 122	u64 end;
 123	u64 boundary[MAX_EXTENTS];
 124	unsigned int nr;
 125};
 126
 127struct reloc_control {
 128	/* block group to relocate */
 129	struct btrfs_block_group *block_group;
 130	/* extent tree */
 131	struct btrfs_root *extent_root;
 132	/* inode for moving data */
 133	struct inode *data_inode;
 134
 135	struct btrfs_block_rsv *block_rsv;
 136
 137	struct btrfs_backref_cache backref_cache;
 138
 139	struct file_extent_cluster cluster;
 140	/* tree blocks have been processed */
 141	struct extent_io_tree processed_blocks;
 142	/* map start of tree root to corresponding reloc tree */
 143	struct mapping_tree reloc_root_tree;
 144	/* list of reloc trees */
 145	struct list_head reloc_roots;
 146	/* list of subvolume trees that get relocated */
 147	struct list_head dirty_subvol_roots;
 148	/* size of metadata reservation for merging reloc trees */
 149	u64 merging_rsv_size;
 150	/* size of relocated tree nodes */
 151	u64 nodes_relocated;
 152	/* reserved size for block group relocation*/
 153	u64 reserved_bytes;
 154
 155	u64 search_start;
 156	u64 extents_found;
 157
 158	unsigned int stage:8;
 159	unsigned int create_reloc_tree:1;
 160	unsigned int merge_reloc_tree:1;
 161	unsigned int found_file_extent:1;
 162};
 163
 164/* stages of data relocation */
 165#define MOVE_DATA_EXTENTS	0
 166#define UPDATE_DATA_PTRS	1
 167
 168static void mark_block_processed(struct reloc_control *rc,
 169				 struct btrfs_backref_node *node)
 170{
 171	u32 blocksize;
 172
 173	if (node->level == 0 ||
 174	    in_range(node->bytenr, rc->block_group->start,
 175		     rc->block_group->length)) {
 176		blocksize = rc->extent_root->fs_info->nodesize;
 177		set_extent_bits(&rc->processed_blocks, node->bytenr,
 178				node->bytenr + blocksize - 1, EXTENT_DIRTY);
 179	}
 180	node->processed = 1;
 181}
 182
 183
 184static void mapping_tree_init(struct mapping_tree *tree)
 185{
 186	tree->rb_root = RB_ROOT;
 187	spin_lock_init(&tree->lock);
 188}
 189
 190/*
 191 * walk up backref nodes until reach node presents tree root
 192 */
 193static struct btrfs_backref_node *walk_up_backref(
 194		struct btrfs_backref_node *node,
 195		struct btrfs_backref_edge *edges[], int *index)
 196{
 197	struct btrfs_backref_edge *edge;
 198	int idx = *index;
 199
 200	while (!list_empty(&node->upper)) {
 201		edge = list_entry(node->upper.next,
 202				  struct btrfs_backref_edge, list[LOWER]);
 203		edges[idx++] = edge;
 204		node = edge->node[UPPER];
 205	}
 206	BUG_ON(node->detached);
 207	*index = idx;
 208	return node;
 209}
 210
 211/*
 212 * walk down backref nodes to find start of next reference path
 213 */
 214static struct btrfs_backref_node *walk_down_backref(
 215		struct btrfs_backref_edge *edges[], int *index)
 216{
 217	struct btrfs_backref_edge *edge;
 218	struct btrfs_backref_node *lower;
 219	int idx = *index;
 220
 221	while (idx > 0) {
 222		edge = edges[idx - 1];
 223		lower = edge->node[LOWER];
 224		if (list_is_last(&edge->list[LOWER], &lower->upper)) {
 225			idx--;
 226			continue;
 227		}
 228		edge = list_entry(edge->list[LOWER].next,
 229				  struct btrfs_backref_edge, list[LOWER]);
 230		edges[idx - 1] = edge;
 231		*index = idx;
 232		return edge->node[UPPER];
 233	}
 234	*index = 0;
 235	return NULL;
 236}
 237
 238static void update_backref_node(struct btrfs_backref_cache *cache,
 239				struct btrfs_backref_node *node, u64 bytenr)
 240{
 241	struct rb_node *rb_node;
 242	rb_erase(&node->rb_node, &cache->rb_root);
 243	node->bytenr = bytenr;
 244	rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
 245	if (rb_node)
 246		btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
 247}
 248
 249/*
 250 * update backref cache after a transaction commit
 251 */
 252static int update_backref_cache(struct btrfs_trans_handle *trans,
 253				struct btrfs_backref_cache *cache)
 254{
 255	struct btrfs_backref_node *node;
 256	int level = 0;
 257
 258	if (cache->last_trans == 0) {
 259		cache->last_trans = trans->transid;
 260		return 0;
 261	}
 262
 263	if (cache->last_trans == trans->transid)
 264		return 0;
 265
 266	/*
 267	 * detached nodes are used to avoid unnecessary backref
 268	 * lookup. transaction commit changes the extent tree.
 269	 * so the detached nodes are no longer useful.
 270	 */
 271	while (!list_empty(&cache->detached)) {
 272		node = list_entry(cache->detached.next,
 273				  struct btrfs_backref_node, list);
 274		btrfs_backref_cleanup_node(cache, node);
 275	}
 276
 277	while (!list_empty(&cache->changed)) {
 278		node = list_entry(cache->changed.next,
 279				  struct btrfs_backref_node, list);
 280		list_del_init(&node->list);
 281		BUG_ON(node->pending);
 282		update_backref_node(cache, node, node->new_bytenr);
 283	}
 284
 285	/*
 286	 * some nodes can be left in the pending list if there were
 287	 * errors during processing the pending nodes.
 288	 */
 289	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
 290		list_for_each_entry(node, &cache->pending[level], list) {
 291			BUG_ON(!node->pending);
 292			if (node->bytenr == node->new_bytenr)
 293				continue;
 294			update_backref_node(cache, node, node->new_bytenr);
 295		}
 296	}
 297
 298	cache->last_trans = 0;
 299	return 1;
 300}
 301
 302static bool reloc_root_is_dead(struct btrfs_root *root)
 303{
 304	/*
 305	 * Pair with set_bit/clear_bit in clean_dirty_subvols and
 306	 * btrfs_update_reloc_root. We need to see the updated bit before
 307	 * trying to access reloc_root
 308	 */
 309	smp_rmb();
 310	if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
 311		return true;
 312	return false;
 313}
 314
 315/*
 316 * Check if this subvolume tree has valid reloc tree.
 317 *
 318 * Reloc tree after swap is considered dead, thus not considered as valid.
 319 * This is enough for most callers, as they don't distinguish dead reloc root
 320 * from no reloc root.  But btrfs_should_ignore_reloc_root() below is a
 321 * special case.
 322 */
 323static bool have_reloc_root(struct btrfs_root *root)
 324{
 325	if (reloc_root_is_dead(root))
 326		return false;
 327	if (!root->reloc_root)
 328		return false;
 329	return true;
 330}
 331
 332int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
 333{
 334	struct btrfs_root *reloc_root;
 335
 336	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
 337		return 0;
 338
 339	/* This root has been merged with its reloc tree, we can ignore it */
 340	if (reloc_root_is_dead(root))
 341		return 1;
 342
 343	reloc_root = root->reloc_root;
 344	if (!reloc_root)
 345		return 0;
 346
 347	if (btrfs_header_generation(reloc_root->commit_root) ==
 348	    root->fs_info->running_transaction->transid)
 349		return 0;
 350	/*
 351	 * if there is reloc tree and it was created in previous
 352	 * transaction backref lookup can find the reloc tree,
 353	 * so backref node for the fs tree root is useless for
 354	 * relocation.
 355	 */
 356	return 1;
 357}
 358
 359/*
 360 * find reloc tree by address of tree root
 361 */
 362struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 363{
 364	struct reloc_control *rc = fs_info->reloc_ctl;
 365	struct rb_node *rb_node;
 366	struct mapping_node *node;
 367	struct btrfs_root *root = NULL;
 368
 369	ASSERT(rc);
 370	spin_lock(&rc->reloc_root_tree.lock);
 371	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
 372	if (rb_node) {
 373		node = rb_entry(rb_node, struct mapping_node, rb_node);
 374		root = node->data;
 375	}
 376	spin_unlock(&rc->reloc_root_tree.lock);
 377	return btrfs_grab_root(root);
 378}
 379
 380/*
 381 * For useless nodes, do two major clean ups:
 382 *
 383 * - Cleanup the children edges and nodes
 384 *   If child node is also orphan (no parent) during cleanup, then the child
 385 *   node will also be cleaned up.
 386 *
 387 * - Freeing up leaves (level 0), keeps nodes detached
 388 *   For nodes, the node is still cached as "detached"
 389 *
 390 * Return false if @node is not in the @useless_nodes list.
 391 * Return true if @node is in the @useless_nodes list.
 392 */
 393static bool handle_useless_nodes(struct reloc_control *rc,
 394				 struct btrfs_backref_node *node)
 395{
 396	struct btrfs_backref_cache *cache = &rc->backref_cache;
 397	struct list_head *useless_node = &cache->useless_node;
 398	bool ret = false;
 399
 400	while (!list_empty(useless_node)) {
 401		struct btrfs_backref_node *cur;
 402
 403		cur = list_first_entry(useless_node, struct btrfs_backref_node,
 404				 list);
 405		list_del_init(&cur->list);
 406
 407		/* Only tree root nodes can be added to @useless_nodes */
 408		ASSERT(list_empty(&cur->upper));
 409
 410		if (cur == node)
 411			ret = true;
 412
 413		/* The node is the lowest node */
 414		if (cur->lowest) {
 415			list_del_init(&cur->lower);
 416			cur->lowest = 0;
 417		}
 418
 419		/* Cleanup the lower edges */
 420		while (!list_empty(&cur->lower)) {
 421			struct btrfs_backref_edge *edge;
 422			struct btrfs_backref_node *lower;
 423
 424			edge = list_entry(cur->lower.next,
 425					struct btrfs_backref_edge, list[UPPER]);
 426			list_del(&edge->list[UPPER]);
 427			list_del(&edge->list[LOWER]);
 428			lower = edge->node[LOWER];
 429			btrfs_backref_free_edge(cache, edge);
 430
 431			/* Child node is also orphan, queue for cleanup */
 432			if (list_empty(&lower->upper))
 433				list_add(&lower->list, useless_node);
 434		}
 435		/* Mark this block processed for relocation */
 436		mark_block_processed(rc, cur);
 437
 438		/*
 439		 * Backref nodes for tree leaves are deleted from the cache.
 440		 * Backref nodes for upper level tree blocks are left in the
 441		 * cache to avoid unnecessary backref lookup.
 442		 */
 443		if (cur->level > 0) {
 444			list_add(&cur->list, &cache->detached);
 445			cur->detached = 1;
 446		} else {
 447			rb_erase(&cur->rb_node, &cache->rb_root);
 448			btrfs_backref_free_node(cache, cur);
 449		}
 450	}
 451	return ret;
 452}
 453
 454/*
 455 * Build backref tree for a given tree block. Root of the backref tree
 456 * corresponds the tree block, leaves of the backref tree correspond roots of
 457 * b-trees that reference the tree block.
 458 *
 459 * The basic idea of this function is check backrefs of a given block to find
 460 * upper level blocks that reference the block, and then check backrefs of
 461 * these upper level blocks recursively. The recursion stops when tree root is
 462 * reached or backrefs for the block is cached.
 463 *
 464 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
 465 * all upper level blocks that directly/indirectly reference the block are also
 466 * cached.
 467 */
 468static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
 469			struct reloc_control *rc, struct btrfs_key *node_key,
 470			int level, u64 bytenr)
 471{
 472	struct btrfs_backref_iter *iter;
 473	struct btrfs_backref_cache *cache = &rc->backref_cache;
 474	/* For searching parent of TREE_BLOCK_REF */
 475	struct btrfs_path *path;
 476	struct btrfs_backref_node *cur;
 477	struct btrfs_backref_node *node = NULL;
 478	struct btrfs_backref_edge *edge;
 479	int ret;
 480	int err = 0;
 481
 482	iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
 483	if (!iter)
 484		return ERR_PTR(-ENOMEM);
 485	path = btrfs_alloc_path();
 486	if (!path) {
 487		err = -ENOMEM;
 488		goto out;
 489	}
 490
 491	node = btrfs_backref_alloc_node(cache, bytenr, level);
 492	if (!node) {
 493		err = -ENOMEM;
 494		goto out;
 495	}
 496
 497	node->lowest = 1;
 498	cur = node;
 499
 500	/* Breadth-first search to build backref cache */
 501	do {
 502		ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
 503						  cur);
 504		if (ret < 0) {
 505			err = ret;
 506			goto out;
 507		}
 508		edge = list_first_entry_or_null(&cache->pending_edge,
 509				struct btrfs_backref_edge, list[UPPER]);
 510		/*
 511		 * The pending list isn't empty, take the first block to
 512		 * process
 513		 */
 514		if (edge) {
 515			list_del_init(&edge->list[UPPER]);
 516			cur = edge->node[UPPER];
 517		}
 518	} while (edge);
 519
 520	/* Finish the upper linkage of newly added edges/nodes */
 521	ret = btrfs_backref_finish_upper_links(cache, node);
 522	if (ret < 0) {
 523		err = ret;
 524		goto out;
 525	}
 526
 527	if (handle_useless_nodes(rc, node))
 528		node = NULL;
 529out:
 530	btrfs_backref_iter_free(iter);
 531	btrfs_free_path(path);
 532	if (err) {
 533		btrfs_backref_error_cleanup(cache, node);
 534		return ERR_PTR(err);
 535	}
 536	ASSERT(!node || !node->detached);
 537	ASSERT(list_empty(&cache->useless_node) &&
 538	       list_empty(&cache->pending_edge));
 539	return node;
 540}
 541
 542/*
 543 * helper to add backref node for the newly created snapshot.
 544 * the backref node is created by cloning backref node that
 545 * corresponds to root of source tree
 546 */
 547static int clone_backref_node(struct btrfs_trans_handle *trans,
 548			      struct reloc_control *rc,
 549			      struct btrfs_root *src,
 550			      struct btrfs_root *dest)
 551{
 552	struct btrfs_root *reloc_root = src->reloc_root;
 553	struct btrfs_backref_cache *cache = &rc->backref_cache;
 554	struct btrfs_backref_node *node = NULL;
 555	struct btrfs_backref_node *new_node;
 556	struct btrfs_backref_edge *edge;
 557	struct btrfs_backref_edge *new_edge;
 558	struct rb_node *rb_node;
 559
 560	if (cache->last_trans > 0)
 561		update_backref_cache(trans, cache);
 562
 563	rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
 564	if (rb_node) {
 565		node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
 566		if (node->detached)
 567			node = NULL;
 568		else
 569			BUG_ON(node->new_bytenr != reloc_root->node->start);
 570	}
 571
 572	if (!node) {
 573		rb_node = rb_simple_search(&cache->rb_root,
 574					   reloc_root->commit_root->start);
 575		if (rb_node) {
 576			node = rb_entry(rb_node, struct btrfs_backref_node,
 577					rb_node);
 578			BUG_ON(node->detached);
 579		}
 580	}
 581
 582	if (!node)
 583		return 0;
 584
 585	new_node = btrfs_backref_alloc_node(cache, dest->node->start,
 586					    node->level);
 587	if (!new_node)
 588		return -ENOMEM;
 589
 590	new_node->lowest = node->lowest;
 591	new_node->checked = 1;
 592	new_node->root = btrfs_grab_root(dest);
 593	ASSERT(new_node->root);
 594
 595	if (!node->lowest) {
 596		list_for_each_entry(edge, &node->lower, list[UPPER]) {
 597			new_edge = btrfs_backref_alloc_edge(cache);
 598			if (!new_edge)
 599				goto fail;
 600
 601			btrfs_backref_link_edge(new_edge, edge->node[LOWER],
 602						new_node, LINK_UPPER);
 603		}
 604	} else {
 605		list_add_tail(&new_node->lower, &cache->leaves);
 606	}
 607
 608	rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
 609				   &new_node->rb_node);
 610	if (rb_node)
 611		btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
 612
 613	if (!new_node->lowest) {
 614		list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
 615			list_add_tail(&new_edge->list[LOWER],
 616				      &new_edge->node[LOWER]->upper);
 617		}
 618	}
 619	return 0;
 620fail:
 621	while (!list_empty(&new_node->lower)) {
 622		new_edge = list_entry(new_node->lower.next,
 623				      struct btrfs_backref_edge, list[UPPER]);
 624		list_del(&new_edge->list[UPPER]);
 625		btrfs_backref_free_edge(cache, new_edge);
 626	}
 627	btrfs_backref_free_node(cache, new_node);
 628	return -ENOMEM;
 629}
 630
 631/*
 632 * helper to add 'address of tree root -> reloc tree' mapping
 633 */
 634static int __must_check __add_reloc_root(struct btrfs_root *root)
 635{
 636	struct btrfs_fs_info *fs_info = root->fs_info;
 637	struct rb_node *rb_node;
 638	struct mapping_node *node;
 639	struct reloc_control *rc = fs_info->reloc_ctl;
 640
 641	node = kmalloc(sizeof(*node), GFP_NOFS);
 642	if (!node)
 643		return -ENOMEM;
 644
 645	node->bytenr = root->commit_root->start;
 646	node->data = root;
 647
 648	spin_lock(&rc->reloc_root_tree.lock);
 649	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
 650				   node->bytenr, &node->rb_node);
 651	spin_unlock(&rc->reloc_root_tree.lock);
 652	if (rb_node) {
 653		btrfs_err(fs_info,
 654			    "Duplicate root found for start=%llu while inserting into relocation tree",
 655			    node->bytenr);
 656		return -EEXIST;
 657	}
 658
 659	list_add_tail(&root->root_list, &rc->reloc_roots);
 660	return 0;
 661}
 662
 663/*
 664 * helper to delete the 'address of tree root -> reloc tree'
 665 * mapping
 666 */
 667static void __del_reloc_root(struct btrfs_root *root)
 668{
 669	struct btrfs_fs_info *fs_info = root->fs_info;
 670	struct rb_node *rb_node;
 671	struct mapping_node *node = NULL;
 672	struct reloc_control *rc = fs_info->reloc_ctl;
 673	bool put_ref = false;
 674
 675	if (rc && root->node) {
 676		spin_lock(&rc->reloc_root_tree.lock);
 677		rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
 678					   root->commit_root->start);
 679		if (rb_node) {
 680			node = rb_entry(rb_node, struct mapping_node, rb_node);
 681			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
 682			RB_CLEAR_NODE(&node->rb_node);
 683		}
 684		spin_unlock(&rc->reloc_root_tree.lock);
 685		ASSERT(!node || (struct btrfs_root *)node->data == root);
 686	}
 687
 688	/*
 689	 * We only put the reloc root here if it's on the list.  There's a lot
 690	 * of places where the pattern is to splice the rc->reloc_roots, process
 691	 * the reloc roots, and then add the reloc root back onto
 692	 * rc->reloc_roots.  If we call __del_reloc_root while it's off of the
 693	 * list we don't want the reference being dropped, because the guy
 694	 * messing with the list is in charge of the reference.
 695	 */
 696	spin_lock(&fs_info->trans_lock);
 697	if (!list_empty(&root->root_list)) {
 698		put_ref = true;
 699		list_del_init(&root->root_list);
 700	}
 701	spin_unlock(&fs_info->trans_lock);
 702	if (put_ref)
 703		btrfs_put_root(root);
 704	kfree(node);
 705}
 706
 707/*
 708 * helper to update the 'address of tree root -> reloc tree'
 709 * mapping
 710 */
 711static int __update_reloc_root(struct btrfs_root *root)
 712{
 713	struct btrfs_fs_info *fs_info = root->fs_info;
 714	struct rb_node *rb_node;
 715	struct mapping_node *node = NULL;
 716	struct reloc_control *rc = fs_info->reloc_ctl;
 717
 718	spin_lock(&rc->reloc_root_tree.lock);
 719	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
 720				   root->commit_root->start);
 721	if (rb_node) {
 722		node = rb_entry(rb_node, struct mapping_node, rb_node);
 723		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
 724	}
 725	spin_unlock(&rc->reloc_root_tree.lock);
 726
 727	if (!node)
 728		return 0;
 729	BUG_ON((struct btrfs_root *)node->data != root);
 730
 731	spin_lock(&rc->reloc_root_tree.lock);
 732	node->bytenr = root->node->start;
 733	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
 734				   node->bytenr, &node->rb_node);
 735	spin_unlock(&rc->reloc_root_tree.lock);
 736	if (rb_node)
 737		btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
 738	return 0;
 739}
 740
 741static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
 742					struct btrfs_root *root, u64 objectid)
 743{
 744	struct btrfs_fs_info *fs_info = root->fs_info;
 745	struct btrfs_root *reloc_root;
 746	struct extent_buffer *eb;
 747	struct btrfs_root_item *root_item;
 748	struct btrfs_key root_key;
 749	int ret = 0;
 750	bool must_abort = false;
 751
 752	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
 753	if (!root_item)
 754		return ERR_PTR(-ENOMEM);
 755
 756	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
 757	root_key.type = BTRFS_ROOT_ITEM_KEY;
 758	root_key.offset = objectid;
 759
 760	if (root->root_key.objectid == objectid) {
 761		u64 commit_root_gen;
 762
 763		/* called by btrfs_init_reloc_root */
 764		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
 765				      BTRFS_TREE_RELOC_OBJECTID);
 766		if (ret)
 767			goto fail;
 768
 769		/*
 770		 * Set the last_snapshot field to the generation of the commit
 771		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
 772		 * correctly (returns true) when the relocation root is created
 773		 * either inside the critical section of a transaction commit
 774		 * (through transaction.c:qgroup_account_snapshot()) and when
 775		 * it's created before the transaction commit is started.
 776		 */
 777		commit_root_gen = btrfs_header_generation(root->commit_root);
 778		btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
 779	} else {
 780		/*
 781		 * called by btrfs_reloc_post_snapshot_hook.
 782		 * the source tree is a reloc tree, all tree blocks
 783		 * modified after it was created have RELOC flag
 784		 * set in their headers. so it's OK to not update
 785		 * the 'last_snapshot'.
 786		 */
 787		ret = btrfs_copy_root(trans, root, root->node, &eb,
 788				      BTRFS_TREE_RELOC_OBJECTID);
 789		if (ret)
 790			goto fail;
 791	}
 792
 793	/*
 794	 * We have changed references at this point, we must abort the
 795	 * transaction if anything fails.
 796	 */
 797	must_abort = true;
 798
 799	memcpy(root_item, &root->root_item, sizeof(*root_item));
 800	btrfs_set_root_bytenr(root_item, eb->start);
 801	btrfs_set_root_level(root_item, btrfs_header_level(eb));
 802	btrfs_set_root_generation(root_item, trans->transid);
 803
 804	if (root->root_key.objectid == objectid) {
 805		btrfs_set_root_refs(root_item, 0);
 806		memset(&root_item->drop_progress, 0,
 807		       sizeof(struct btrfs_disk_key));
 808		btrfs_set_root_drop_level(root_item, 0);
 809	}
 810
 811	btrfs_tree_unlock(eb);
 812	free_extent_buffer(eb);
 813
 814	ret = btrfs_insert_root(trans, fs_info->tree_root,
 815				&root_key, root_item);
 816	if (ret)
 817		goto fail;
 818
 819	kfree(root_item);
 820
 821	reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
 822	if (IS_ERR(reloc_root)) {
 823		ret = PTR_ERR(reloc_root);
 824		goto abort;
 825	}
 826	set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
 827	reloc_root->last_trans = trans->transid;
 828	return reloc_root;
 829fail:
 830	kfree(root_item);
 831abort:
 832	if (must_abort)
 833		btrfs_abort_transaction(trans, ret);
 834	return ERR_PTR(ret);
 835}
 836
 837/*
 838 * create reloc tree for a given fs tree. reloc tree is just a
 839 * snapshot of the fs tree with special root objectid.
 840 *
 841 * The reloc_root comes out of here with two references, one for
 842 * root->reloc_root, and another for being on the rc->reloc_roots list.
 843 */
 844int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
 845			  struct btrfs_root *root)
 846{
 847	struct btrfs_fs_info *fs_info = root->fs_info;
 848	struct btrfs_root *reloc_root;
 849	struct reloc_control *rc = fs_info->reloc_ctl;
 850	struct btrfs_block_rsv *rsv;
 851	int clear_rsv = 0;
 852	int ret;
 853
 854	if (!rc)
 855		return 0;
 856
 857	/*
 858	 * The subvolume has reloc tree but the swap is finished, no need to
 859	 * create/update the dead reloc tree
 860	 */
 861	if (reloc_root_is_dead(root))
 862		return 0;
 863
 864	/*
 865	 * This is subtle but important.  We do not do
 866	 * record_root_in_transaction for reloc roots, instead we record their
 867	 * corresponding fs root, and then here we update the last trans for the
 868	 * reloc root.  This means that we have to do this for the entire life
 869	 * of the reloc root, regardless of which stage of the relocation we are
 870	 * in.
 871	 */
 872	if (root->reloc_root) {
 873		reloc_root = root->reloc_root;
 874		reloc_root->last_trans = trans->transid;
 875		return 0;
 876	}
 877
 878	/*
 879	 * We are merging reloc roots, we do not need new reloc trees.  Also
 880	 * reloc trees never need their own reloc tree.
 881	 */
 882	if (!rc->create_reloc_tree ||
 883	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
 884		return 0;
 885
 886	if (!trans->reloc_reserved) {
 887		rsv = trans->block_rsv;
 888		trans->block_rsv = rc->block_rsv;
 889		clear_rsv = 1;
 890	}
 891	reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
 892	if (clear_rsv)
 893		trans->block_rsv = rsv;
 894	if (IS_ERR(reloc_root))
 895		return PTR_ERR(reloc_root);
 896
 897	ret = __add_reloc_root(reloc_root);
 898	ASSERT(ret != -EEXIST);
 899	if (ret) {
 900		/* Pairs with create_reloc_root */
 901		btrfs_put_root(reloc_root);
 902		return ret;
 903	}
 904	root->reloc_root = btrfs_grab_root(reloc_root);
 905	return 0;
 906}
 907
 908/*
 909 * update root item of reloc tree
 910 */
 911int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
 912			    struct btrfs_root *root)
 913{
 914	struct btrfs_fs_info *fs_info = root->fs_info;
 915	struct btrfs_root *reloc_root;
 916	struct btrfs_root_item *root_item;
 917	int ret;
 918
 919	if (!have_reloc_root(root))
 920		return 0;
 921
 922	reloc_root = root->reloc_root;
 923	root_item = &reloc_root->root_item;
 924
 925	/*
 926	 * We are probably ok here, but __del_reloc_root() will drop its ref of
 927	 * the root.  We have the ref for root->reloc_root, but just in case
 928	 * hold it while we update the reloc root.
 929	 */
 930	btrfs_grab_root(reloc_root);
 931
 932	/* root->reloc_root will stay until current relocation finished */
 933	if (fs_info->reloc_ctl->merge_reloc_tree &&
 934	    btrfs_root_refs(root_item) == 0) {
 935		set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
 936		/*
 937		 * Mark the tree as dead before we change reloc_root so
 938		 * have_reloc_root will not touch it from now on.
 939		 */
 940		smp_wmb();
 941		__del_reloc_root(reloc_root);
 942	}
 943
 944	if (reloc_root->commit_root != reloc_root->node) {
 945		__update_reloc_root(reloc_root);
 946		btrfs_set_root_node(root_item, reloc_root->node);
 947		free_extent_buffer(reloc_root->commit_root);
 948		reloc_root->commit_root = btrfs_root_node(reloc_root);
 949	}
 950
 951	ret = btrfs_update_root(trans, fs_info->tree_root,
 952				&reloc_root->root_key, root_item);
 953	btrfs_put_root(reloc_root);
 954	return ret;
 955}
 956
 957/*
 958 * helper to find first cached inode with inode number >= objectid
 959 * in a subvolume
 960 */
 961static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
 962{
 963	struct rb_node *node;
 964	struct rb_node *prev;
 965	struct btrfs_inode *entry;
 966	struct inode *inode;
 967
 968	spin_lock(&root->inode_lock);
 969again:
 970	node = root->inode_tree.rb_node;
 971	prev = NULL;
 972	while (node) {
 973		prev = node;
 974		entry = rb_entry(node, struct btrfs_inode, rb_node);
 975
 976		if (objectid < btrfs_ino(entry))
 977			node = node->rb_left;
 978		else if (objectid > btrfs_ino(entry))
 979			node = node->rb_right;
 980		else
 981			break;
 982	}
 983	if (!node) {
 984		while (prev) {
 985			entry = rb_entry(prev, struct btrfs_inode, rb_node);
 986			if (objectid <= btrfs_ino(entry)) {
 987				node = prev;
 988				break;
 989			}
 990			prev = rb_next(prev);
 991		}
 992	}
 993	while (node) {
 994		entry = rb_entry(node, struct btrfs_inode, rb_node);
 995		inode = igrab(&entry->vfs_inode);
 996		if (inode) {
 997			spin_unlock(&root->inode_lock);
 998			return inode;
 999		}
1000
1001		objectid = btrfs_ino(entry) + 1;
1002		if (cond_resched_lock(&root->inode_lock))
1003			goto again;
1004
1005		node = rb_next(node);
1006	}
1007	spin_unlock(&root->inode_lock);
1008	return NULL;
1009}
1010
1011/*
1012 * get new location of data
1013 */
1014static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1015			    u64 bytenr, u64 num_bytes)
1016{
1017	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1018	struct btrfs_path *path;
1019	struct btrfs_file_extent_item *fi;
1020	struct extent_buffer *leaf;
1021	int ret;
1022
1023	path = btrfs_alloc_path();
1024	if (!path)
1025		return -ENOMEM;
1026
1027	bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1028	ret = btrfs_lookup_file_extent(NULL, root, path,
1029			btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1030	if (ret < 0)
1031		goto out;
1032	if (ret > 0) {
1033		ret = -ENOENT;
1034		goto out;
1035	}
1036
1037	leaf = path->nodes[0];
1038	fi = btrfs_item_ptr(leaf, path->slots[0],
1039			    struct btrfs_file_extent_item);
1040
1041	BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1042	       btrfs_file_extent_compression(leaf, fi) ||
1043	       btrfs_file_extent_encryption(leaf, fi) ||
1044	       btrfs_file_extent_other_encoding(leaf, fi));
1045
1046	if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1047		ret = -EINVAL;
1048		goto out;
1049	}
1050
1051	*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1052	ret = 0;
1053out:
1054	btrfs_free_path(path);
1055	return ret;
1056}
1057
1058/*
1059 * update file extent items in the tree leaf to point to
1060 * the new locations.
1061 */
1062static noinline_for_stack
1063int replace_file_extents(struct btrfs_trans_handle *trans,
1064			 struct reloc_control *rc,
1065			 struct btrfs_root *root,
1066			 struct extent_buffer *leaf)
1067{
1068	struct btrfs_fs_info *fs_info = root->fs_info;
1069	struct btrfs_key key;
1070	struct btrfs_file_extent_item *fi;
1071	struct inode *inode = NULL;
1072	u64 parent;
1073	u64 bytenr;
1074	u64 new_bytenr = 0;
1075	u64 num_bytes;
1076	u64 end;
1077	u32 nritems;
1078	u32 i;
1079	int ret = 0;
1080	int first = 1;
1081	int dirty = 0;
1082
1083	if (rc->stage != UPDATE_DATA_PTRS)
1084		return 0;
1085
1086	/* reloc trees always use full backref */
1087	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1088		parent = leaf->start;
1089	else
1090		parent = 0;
1091
1092	nritems = btrfs_header_nritems(leaf);
1093	for (i = 0; i < nritems; i++) {
1094		struct btrfs_ref ref = { 0 };
1095
1096		cond_resched();
1097		btrfs_item_key_to_cpu(leaf, &key, i);
1098		if (key.type != BTRFS_EXTENT_DATA_KEY)
1099			continue;
1100		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1101		if (btrfs_file_extent_type(leaf, fi) ==
1102		    BTRFS_FILE_EXTENT_INLINE)
1103			continue;
1104		bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1105		num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1106		if (bytenr == 0)
1107			continue;
1108		if (!in_range(bytenr, rc->block_group->start,
1109			      rc->block_group->length))
1110			continue;
1111
1112		/*
1113		 * if we are modifying block in fs tree, wait for read_folio
1114		 * to complete and drop the extent cache
1115		 */
1116		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1117			if (first) {
1118				inode = find_next_inode(root, key.objectid);
1119				first = 0;
1120			} else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1121				btrfs_add_delayed_iput(BTRFS_I(inode));
1122				inode = find_next_inode(root, key.objectid);
1123			}
1124			if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1125				struct extent_state *cached_state = NULL;
1126
1127				end = key.offset +
1128				      btrfs_file_extent_num_bytes(leaf, fi);
1129				WARN_ON(!IS_ALIGNED(key.offset,
1130						    fs_info->sectorsize));
1131				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1132				end--;
1133				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1134						      key.offset, end,
1135						      &cached_state);
1136				if (!ret)
1137					continue;
1138
1139				btrfs_drop_extent_map_range(BTRFS_I(inode),
1140							    key.offset, end, true);
1141				unlock_extent(&BTRFS_I(inode)->io_tree,
1142					      key.offset, end, &cached_state);
1143			}
1144		}
1145
1146		ret = get_new_location(rc->data_inode, &new_bytenr,
1147				       bytenr, num_bytes);
1148		if (ret) {
1149			/*
1150			 * Don't have to abort since we've not changed anything
1151			 * in the file extent yet.
1152			 */
1153			break;
1154		}
1155
1156		btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1157		dirty = 1;
1158
1159		key.offset -= btrfs_file_extent_offset(leaf, fi);
1160		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1161				       num_bytes, parent);
 
1162		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1163				    key.objectid, key.offset,
1164				    root->root_key.objectid, false);
1165		ret = btrfs_inc_extent_ref(trans, &ref);
1166		if (ret) {
1167			btrfs_abort_transaction(trans, ret);
1168			break;
1169		}
1170
1171		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1172				       num_bytes, parent);
 
1173		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1174				    key.objectid, key.offset,
1175				    root->root_key.objectid, false);
1176		ret = btrfs_free_extent(trans, &ref);
1177		if (ret) {
1178			btrfs_abort_transaction(trans, ret);
1179			break;
1180		}
1181	}
1182	if (dirty)
1183		btrfs_mark_buffer_dirty(leaf);
1184	if (inode)
1185		btrfs_add_delayed_iput(BTRFS_I(inode));
1186	return ret;
1187}
1188
1189static noinline_for_stack
1190int memcmp_node_keys(struct extent_buffer *eb, int slot,
1191		     struct btrfs_path *path, int level)
1192{
1193	struct btrfs_disk_key key1;
1194	struct btrfs_disk_key key2;
1195	btrfs_node_key(eb, &key1, slot);
1196	btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1197	return memcmp(&key1, &key2, sizeof(key1));
1198}
1199
1200/*
1201 * try to replace tree blocks in fs tree with the new blocks
1202 * in reloc tree. tree blocks haven't been modified since the
1203 * reloc tree was create can be replaced.
1204 *
1205 * if a block was replaced, level of the block + 1 is returned.
1206 * if no block got replaced, 0 is returned. if there are other
1207 * errors, a negative error number is returned.
1208 */
1209static noinline_for_stack
1210int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1211		 struct btrfs_root *dest, struct btrfs_root *src,
1212		 struct btrfs_path *path, struct btrfs_key *next_key,
1213		 int lowest_level, int max_level)
1214{
1215	struct btrfs_fs_info *fs_info = dest->fs_info;
1216	struct extent_buffer *eb;
1217	struct extent_buffer *parent;
1218	struct btrfs_ref ref = { 0 };
1219	struct btrfs_key key;
1220	u64 old_bytenr;
1221	u64 new_bytenr;
1222	u64 old_ptr_gen;
1223	u64 new_ptr_gen;
1224	u64 last_snapshot;
1225	u32 blocksize;
1226	int cow = 0;
1227	int level;
1228	int ret;
1229	int slot;
1230
1231	ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1232	ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1233
1234	last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1235again:
1236	slot = path->slots[lowest_level];
1237	btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1238
1239	eb = btrfs_lock_root_node(dest);
1240	level = btrfs_header_level(eb);
1241
1242	if (level < lowest_level) {
1243		btrfs_tree_unlock(eb);
1244		free_extent_buffer(eb);
1245		return 0;
1246	}
1247
1248	if (cow) {
1249		ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1250				      BTRFS_NESTING_COW);
1251		if (ret) {
1252			btrfs_tree_unlock(eb);
1253			free_extent_buffer(eb);
1254			return ret;
1255		}
1256	}
1257
1258	if (next_key) {
1259		next_key->objectid = (u64)-1;
1260		next_key->type = (u8)-1;
1261		next_key->offset = (u64)-1;
1262	}
1263
1264	parent = eb;
1265	while (1) {
1266		level = btrfs_header_level(parent);
1267		ASSERT(level >= lowest_level);
1268
1269		ret = btrfs_bin_search(parent, &key, &slot);
1270		if (ret < 0)
1271			break;
1272		if (ret && slot > 0)
1273			slot--;
1274
1275		if (next_key && slot + 1 < btrfs_header_nritems(parent))
1276			btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1277
1278		old_bytenr = btrfs_node_blockptr(parent, slot);
1279		blocksize = fs_info->nodesize;
1280		old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1281
1282		if (level <= max_level) {
1283			eb = path->nodes[level];
1284			new_bytenr = btrfs_node_blockptr(eb,
1285							path->slots[level]);
1286			new_ptr_gen = btrfs_node_ptr_generation(eb,
1287							path->slots[level]);
1288		} else {
1289			new_bytenr = 0;
1290			new_ptr_gen = 0;
1291		}
1292
1293		if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1294			ret = level;
1295			break;
1296		}
1297
1298		if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1299		    memcmp_node_keys(parent, slot, path, level)) {
1300			if (level <= lowest_level) {
1301				ret = 0;
1302				break;
1303			}
1304
1305			eb = btrfs_read_node_slot(parent, slot);
1306			if (IS_ERR(eb)) {
1307				ret = PTR_ERR(eb);
1308				break;
1309			}
1310			btrfs_tree_lock(eb);
1311			if (cow) {
1312				ret = btrfs_cow_block(trans, dest, eb, parent,
1313						      slot, &eb,
1314						      BTRFS_NESTING_COW);
1315				if (ret) {
1316					btrfs_tree_unlock(eb);
1317					free_extent_buffer(eb);
1318					break;
1319				}
1320			}
1321
1322			btrfs_tree_unlock(parent);
1323			free_extent_buffer(parent);
1324
1325			parent = eb;
1326			continue;
1327		}
1328
1329		if (!cow) {
1330			btrfs_tree_unlock(parent);
1331			free_extent_buffer(parent);
1332			cow = 1;
1333			goto again;
1334		}
1335
1336		btrfs_node_key_to_cpu(path->nodes[level], &key,
1337				      path->slots[level]);
1338		btrfs_release_path(path);
1339
1340		path->lowest_level = level;
1341		set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1342		ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1343		clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1344		path->lowest_level = 0;
1345		if (ret) {
1346			if (ret > 0)
1347				ret = -ENOENT;
1348			break;
1349		}
1350
1351		/*
1352		 * Info qgroup to trace both subtrees.
1353		 *
1354		 * We must trace both trees.
1355		 * 1) Tree reloc subtree
1356		 *    If not traced, we will leak data numbers
1357		 * 2) Fs subtree
1358		 *    If not traced, we will double count old data
1359		 *
1360		 * We don't scan the subtree right now, but only record
1361		 * the swapped tree blocks.
1362		 * The real subtree rescan is delayed until we have new
1363		 * CoW on the subtree root node before transaction commit.
1364		 */
1365		ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1366				rc->block_group, parent, slot,
1367				path->nodes[level], path->slots[level],
1368				last_snapshot);
1369		if (ret < 0)
1370			break;
1371		/*
1372		 * swap blocks in fs tree and reloc tree.
1373		 */
1374		btrfs_set_node_blockptr(parent, slot, new_bytenr);
1375		btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1376		btrfs_mark_buffer_dirty(parent);
1377
1378		btrfs_set_node_blockptr(path->nodes[level],
1379					path->slots[level], old_bytenr);
1380		btrfs_set_node_ptr_generation(path->nodes[level],
1381					      path->slots[level], old_ptr_gen);
1382		btrfs_mark_buffer_dirty(path->nodes[level]);
1383
1384		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1385				       blocksize, path->nodes[level]->start);
1386		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1387				    0, true);
1388		ret = btrfs_inc_extent_ref(trans, &ref);
1389		if (ret) {
1390			btrfs_abort_transaction(trans, ret);
1391			break;
1392		}
1393		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1394				       blocksize, 0);
1395		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
1396				    true);
1397		ret = btrfs_inc_extent_ref(trans, &ref);
1398		if (ret) {
1399			btrfs_abort_transaction(trans, ret);
1400			break;
1401		}
1402
1403		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1404				       blocksize, path->nodes[level]->start);
1405		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1406				    0, true);
1407		ret = btrfs_free_extent(trans, &ref);
1408		if (ret) {
1409			btrfs_abort_transaction(trans, ret);
1410			break;
1411		}
1412
1413		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1414				       blocksize, 0);
1415		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
1416				    0, true);
1417		ret = btrfs_free_extent(trans, &ref);
1418		if (ret) {
1419			btrfs_abort_transaction(trans, ret);
1420			break;
1421		}
1422
1423		btrfs_unlock_up_safe(path, 0);
1424
1425		ret = level;
1426		break;
1427	}
1428	btrfs_tree_unlock(parent);
1429	free_extent_buffer(parent);
1430	return ret;
1431}
1432
1433/*
1434 * helper to find next relocated block in reloc tree
1435 */
1436static noinline_for_stack
1437int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1438		       int *level)
1439{
1440	struct extent_buffer *eb;
1441	int i;
1442	u64 last_snapshot;
1443	u32 nritems;
1444
1445	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1446
1447	for (i = 0; i < *level; i++) {
1448		free_extent_buffer(path->nodes[i]);
1449		path->nodes[i] = NULL;
1450	}
1451
1452	for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1453		eb = path->nodes[i];
1454		nritems = btrfs_header_nritems(eb);
1455		while (path->slots[i] + 1 < nritems) {
1456			path->slots[i]++;
1457			if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1458			    last_snapshot)
1459				continue;
1460
1461			*level = i;
1462			return 0;
1463		}
1464		free_extent_buffer(path->nodes[i]);
1465		path->nodes[i] = NULL;
1466	}
1467	return 1;
1468}
1469
1470/*
1471 * walk down reloc tree to find relocated block of lowest level
1472 */
1473static noinline_for_stack
1474int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1475			 int *level)
1476{
1477	struct extent_buffer *eb = NULL;
1478	int i;
1479	u64 ptr_gen = 0;
1480	u64 last_snapshot;
1481	u32 nritems;
1482
1483	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1484
1485	for (i = *level; i > 0; i--) {
1486		eb = path->nodes[i];
1487		nritems = btrfs_header_nritems(eb);
1488		while (path->slots[i] < nritems) {
1489			ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1490			if (ptr_gen > last_snapshot)
1491				break;
1492			path->slots[i]++;
1493		}
1494		if (path->slots[i] >= nritems) {
1495			if (i == *level)
1496				break;
1497			*level = i + 1;
1498			return 0;
1499		}
1500		if (i == 1) {
1501			*level = i;
1502			return 0;
1503		}
1504
1505		eb = btrfs_read_node_slot(eb, path->slots[i]);
1506		if (IS_ERR(eb))
1507			return PTR_ERR(eb);
1508		BUG_ON(btrfs_header_level(eb) != i - 1);
1509		path->nodes[i - 1] = eb;
1510		path->slots[i - 1] = 0;
1511	}
1512	return 1;
1513}
1514
1515/*
1516 * invalidate extent cache for file extents whose key in range of
1517 * [min_key, max_key)
1518 */
1519static int invalidate_extent_cache(struct btrfs_root *root,
1520				   struct btrfs_key *min_key,
1521				   struct btrfs_key *max_key)
1522{
1523	struct btrfs_fs_info *fs_info = root->fs_info;
1524	struct inode *inode = NULL;
1525	u64 objectid;
1526	u64 start, end;
1527	u64 ino;
1528
1529	objectid = min_key->objectid;
1530	while (1) {
1531		struct extent_state *cached_state = NULL;
1532
1533		cond_resched();
1534		iput(inode);
1535
1536		if (objectid > max_key->objectid)
1537			break;
1538
1539		inode = find_next_inode(root, objectid);
1540		if (!inode)
1541			break;
1542		ino = btrfs_ino(BTRFS_I(inode));
1543
1544		if (ino > max_key->objectid) {
1545			iput(inode);
1546			break;
1547		}
1548
1549		objectid = ino + 1;
1550		if (!S_ISREG(inode->i_mode))
1551			continue;
1552
1553		if (unlikely(min_key->objectid == ino)) {
1554			if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1555				continue;
1556			if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1557				start = 0;
1558			else {
1559				start = min_key->offset;
1560				WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1561			}
1562		} else {
1563			start = 0;
1564		}
1565
1566		if (unlikely(max_key->objectid == ino)) {
1567			if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1568				continue;
1569			if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1570				end = (u64)-1;
1571			} else {
1572				if (max_key->offset == 0)
1573					continue;
1574				end = max_key->offset;
1575				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1576				end--;
1577			}
1578		} else {
1579			end = (u64)-1;
1580		}
1581
1582		/* the lock_extent waits for read_folio to complete */
1583		lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1584		btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
1585		unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1586	}
1587	return 0;
1588}
1589
1590static int find_next_key(struct btrfs_path *path, int level,
1591			 struct btrfs_key *key)
1592
1593{
1594	while (level < BTRFS_MAX_LEVEL) {
1595		if (!path->nodes[level])
1596			break;
1597		if (path->slots[level] + 1 <
1598		    btrfs_header_nritems(path->nodes[level])) {
1599			btrfs_node_key_to_cpu(path->nodes[level], key,
1600					      path->slots[level] + 1);
1601			return 0;
1602		}
1603		level++;
1604	}
1605	return 1;
1606}
1607
1608/*
1609 * Insert current subvolume into reloc_control::dirty_subvol_roots
1610 */
1611static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1612			       struct reloc_control *rc,
1613			       struct btrfs_root *root)
1614{
1615	struct btrfs_root *reloc_root = root->reloc_root;
1616	struct btrfs_root_item *reloc_root_item;
1617	int ret;
1618
1619	/* @root must be a subvolume tree root with a valid reloc tree */
1620	ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1621	ASSERT(reloc_root);
1622
1623	reloc_root_item = &reloc_root->root_item;
1624	memset(&reloc_root_item->drop_progress, 0,
1625		sizeof(reloc_root_item->drop_progress));
1626	btrfs_set_root_drop_level(reloc_root_item, 0);
1627	btrfs_set_root_refs(reloc_root_item, 0);
1628	ret = btrfs_update_reloc_root(trans, root);
1629	if (ret)
1630		return ret;
1631
1632	if (list_empty(&root->reloc_dirty_list)) {
1633		btrfs_grab_root(root);
1634		list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1635	}
1636
1637	return 0;
1638}
1639
1640static int clean_dirty_subvols(struct reloc_control *rc)
1641{
1642	struct btrfs_root *root;
1643	struct btrfs_root *next;
1644	int ret = 0;
1645	int ret2;
1646
1647	list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1648				 reloc_dirty_list) {
1649		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1650			/* Merged subvolume, cleanup its reloc root */
1651			struct btrfs_root *reloc_root = root->reloc_root;
1652
1653			list_del_init(&root->reloc_dirty_list);
1654			root->reloc_root = NULL;
1655			/*
1656			 * Need barrier to ensure clear_bit() only happens after
1657			 * root->reloc_root = NULL. Pairs with have_reloc_root.
1658			 */
1659			smp_wmb();
1660			clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1661			if (reloc_root) {
1662				/*
1663				 * btrfs_drop_snapshot drops our ref we hold for
1664				 * ->reloc_root.  If it fails however we must
1665				 * drop the ref ourselves.
1666				 */
1667				ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1668				if (ret2 < 0) {
1669					btrfs_put_root(reloc_root);
1670					if (!ret)
1671						ret = ret2;
1672				}
1673			}
1674			btrfs_put_root(root);
1675		} else {
1676			/* Orphan reloc tree, just clean it up */
1677			ret2 = btrfs_drop_snapshot(root, 0, 1);
1678			if (ret2 < 0) {
1679				btrfs_put_root(root);
1680				if (!ret)
1681					ret = ret2;
1682			}
1683		}
1684	}
1685	return ret;
1686}
1687
1688/*
1689 * merge the relocated tree blocks in reloc tree with corresponding
1690 * fs tree.
1691 */
1692static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1693					       struct btrfs_root *root)
1694{
1695	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1696	struct btrfs_key key;
1697	struct btrfs_key next_key;
1698	struct btrfs_trans_handle *trans = NULL;
1699	struct btrfs_root *reloc_root;
1700	struct btrfs_root_item *root_item;
1701	struct btrfs_path *path;
1702	struct extent_buffer *leaf;
1703	int reserve_level;
1704	int level;
1705	int max_level;
1706	int replaced = 0;
1707	int ret = 0;
1708	u32 min_reserved;
1709
1710	path = btrfs_alloc_path();
1711	if (!path)
1712		return -ENOMEM;
1713	path->reada = READA_FORWARD;
1714
1715	reloc_root = root->reloc_root;
1716	root_item = &reloc_root->root_item;
1717
1718	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1719		level = btrfs_root_level(root_item);
1720		atomic_inc(&reloc_root->node->refs);
1721		path->nodes[level] = reloc_root->node;
1722		path->slots[level] = 0;
1723	} else {
1724		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1725
1726		level = btrfs_root_drop_level(root_item);
1727		BUG_ON(level == 0);
1728		path->lowest_level = level;
1729		ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1730		path->lowest_level = 0;
1731		if (ret < 0) {
1732			btrfs_free_path(path);
1733			return ret;
1734		}
1735
1736		btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1737				      path->slots[level]);
1738		WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1739
1740		btrfs_unlock_up_safe(path, 0);
1741	}
1742
1743	/*
1744	 * In merge_reloc_root(), we modify the upper level pointer to swap the
1745	 * tree blocks between reloc tree and subvolume tree.  Thus for tree
1746	 * block COW, we COW at most from level 1 to root level for each tree.
1747	 *
1748	 * Thus the needed metadata size is at most root_level * nodesize,
1749	 * and * 2 since we have two trees to COW.
1750	 */
1751	reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1752	min_reserved = fs_info->nodesize * reserve_level * 2;
1753	memset(&next_key, 0, sizeof(next_key));
1754
1755	while (1) {
1756		ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1757					     min_reserved,
1758					     BTRFS_RESERVE_FLUSH_LIMIT);
1759		if (ret)
1760			goto out;
1761		trans = btrfs_start_transaction(root, 0);
1762		if (IS_ERR(trans)) {
1763			ret = PTR_ERR(trans);
1764			trans = NULL;
1765			goto out;
1766		}
1767
1768		/*
1769		 * At this point we no longer have a reloc_control, so we can't
1770		 * depend on btrfs_init_reloc_root to update our last_trans.
1771		 *
1772		 * But that's ok, we started the trans handle on our
1773		 * corresponding fs_root, which means it's been added to the
1774		 * dirty list.  At commit time we'll still call
1775		 * btrfs_update_reloc_root() and update our root item
1776		 * appropriately.
1777		 */
1778		reloc_root->last_trans = trans->transid;
1779		trans->block_rsv = rc->block_rsv;
1780
1781		replaced = 0;
1782		max_level = level;
1783
1784		ret = walk_down_reloc_tree(reloc_root, path, &level);
1785		if (ret < 0)
1786			goto out;
1787		if (ret > 0)
1788			break;
1789
1790		if (!find_next_key(path, level, &key) &&
1791		    btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1792			ret = 0;
1793		} else {
1794			ret = replace_path(trans, rc, root, reloc_root, path,
1795					   &next_key, level, max_level);
1796		}
1797		if (ret < 0)
1798			goto out;
1799		if (ret > 0) {
1800			level = ret;
1801			btrfs_node_key_to_cpu(path->nodes[level], &key,
1802					      path->slots[level]);
1803			replaced = 1;
1804		}
1805
1806		ret = walk_up_reloc_tree(reloc_root, path, &level);
1807		if (ret > 0)
1808			break;
1809
1810		BUG_ON(level == 0);
1811		/*
1812		 * save the merging progress in the drop_progress.
1813		 * this is OK since root refs == 1 in this case.
1814		 */
1815		btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1816			       path->slots[level]);
1817		btrfs_set_root_drop_level(root_item, level);
1818
1819		btrfs_end_transaction_throttle(trans);
1820		trans = NULL;
1821
1822		btrfs_btree_balance_dirty(fs_info);
1823
1824		if (replaced && rc->stage == UPDATE_DATA_PTRS)
1825			invalidate_extent_cache(root, &key, &next_key);
1826	}
1827
1828	/*
1829	 * handle the case only one block in the fs tree need to be
1830	 * relocated and the block is tree root.
1831	 */
1832	leaf = btrfs_lock_root_node(root);
1833	ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1834			      BTRFS_NESTING_COW);
1835	btrfs_tree_unlock(leaf);
1836	free_extent_buffer(leaf);
1837out:
1838	btrfs_free_path(path);
1839
1840	if (ret == 0) {
1841		ret = insert_dirty_subvol(trans, rc, root);
1842		if (ret)
1843			btrfs_abort_transaction(trans, ret);
1844	}
1845
1846	if (trans)
1847		btrfs_end_transaction_throttle(trans);
1848
1849	btrfs_btree_balance_dirty(fs_info);
1850
1851	if (replaced && rc->stage == UPDATE_DATA_PTRS)
1852		invalidate_extent_cache(root, &key, &next_key);
1853
1854	return ret;
1855}
1856
1857static noinline_for_stack
1858int prepare_to_merge(struct reloc_control *rc, int err)
1859{
1860	struct btrfs_root *root = rc->extent_root;
1861	struct btrfs_fs_info *fs_info = root->fs_info;
1862	struct btrfs_root *reloc_root;
1863	struct btrfs_trans_handle *trans;
1864	LIST_HEAD(reloc_roots);
1865	u64 num_bytes = 0;
1866	int ret;
1867
1868	mutex_lock(&fs_info->reloc_mutex);
1869	rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1870	rc->merging_rsv_size += rc->nodes_relocated * 2;
1871	mutex_unlock(&fs_info->reloc_mutex);
1872
1873again:
1874	if (!err) {
1875		num_bytes = rc->merging_rsv_size;
1876		ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1877					  BTRFS_RESERVE_FLUSH_ALL);
1878		if (ret)
1879			err = ret;
1880	}
1881
1882	trans = btrfs_join_transaction(rc->extent_root);
1883	if (IS_ERR(trans)) {
1884		if (!err)
1885			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1886						num_bytes, NULL);
1887		return PTR_ERR(trans);
1888	}
1889
1890	if (!err) {
1891		if (num_bytes != rc->merging_rsv_size) {
1892			btrfs_end_transaction(trans);
1893			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1894						num_bytes, NULL);
1895			goto again;
1896		}
1897	}
1898
1899	rc->merge_reloc_tree = 1;
1900
1901	while (!list_empty(&rc->reloc_roots)) {
1902		reloc_root = list_entry(rc->reloc_roots.next,
1903					struct btrfs_root, root_list);
1904		list_del_init(&reloc_root->root_list);
1905
1906		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1907				false);
1908		if (IS_ERR(root)) {
1909			/*
1910			 * Even if we have an error we need this reloc root
1911			 * back on our list so we can clean up properly.
1912			 */
1913			list_add(&reloc_root->root_list, &reloc_roots);
1914			btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1915			if (!err)
1916				err = PTR_ERR(root);
1917			break;
1918		}
1919		ASSERT(root->reloc_root == reloc_root);
1920
1921		/*
1922		 * set reference count to 1, so btrfs_recover_relocation
1923		 * knows it should resumes merging
1924		 */
1925		if (!err)
1926			btrfs_set_root_refs(&reloc_root->root_item, 1);
1927		ret = btrfs_update_reloc_root(trans, root);
1928
1929		/*
1930		 * Even if we have an error we need this reloc root back on our
1931		 * list so we can clean up properly.
1932		 */
1933		list_add(&reloc_root->root_list, &reloc_roots);
1934		btrfs_put_root(root);
1935
1936		if (ret) {
1937			btrfs_abort_transaction(trans, ret);
1938			if (!err)
1939				err = ret;
1940			break;
1941		}
1942	}
1943
1944	list_splice(&reloc_roots, &rc->reloc_roots);
1945
1946	if (!err)
1947		err = btrfs_commit_transaction(trans);
1948	else
1949		btrfs_end_transaction(trans);
1950	return err;
1951}
1952
1953static noinline_for_stack
1954void free_reloc_roots(struct list_head *list)
1955{
1956	struct btrfs_root *reloc_root, *tmp;
1957
1958	list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1959		__del_reloc_root(reloc_root);
1960}
1961
1962static noinline_for_stack
1963void merge_reloc_roots(struct reloc_control *rc)
1964{
1965	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1966	struct btrfs_root *root;
1967	struct btrfs_root *reloc_root;
1968	LIST_HEAD(reloc_roots);
1969	int found = 0;
1970	int ret = 0;
1971again:
1972	root = rc->extent_root;
1973
1974	/*
1975	 * this serializes us with btrfs_record_root_in_transaction,
1976	 * we have to make sure nobody is in the middle of
1977	 * adding their roots to the list while we are
1978	 * doing this splice
1979	 */
1980	mutex_lock(&fs_info->reloc_mutex);
1981	list_splice_init(&rc->reloc_roots, &reloc_roots);
1982	mutex_unlock(&fs_info->reloc_mutex);
1983
1984	while (!list_empty(&reloc_roots)) {
1985		found = 1;
1986		reloc_root = list_entry(reloc_roots.next,
1987					struct btrfs_root, root_list);
1988
1989		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1990					 false);
1991		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1992			if (IS_ERR(root)) {
1993				/*
1994				 * For recovery we read the fs roots on mount,
1995				 * and if we didn't find the root then we marked
1996				 * the reloc root as a garbage root.  For normal
1997				 * relocation obviously the root should exist in
1998				 * memory.  However there's no reason we can't
1999				 * handle the error properly here just in case.
2000				 */
2001				ASSERT(0);
2002				ret = PTR_ERR(root);
2003				goto out;
2004			}
2005			if (root->reloc_root != reloc_root) {
2006				/*
2007				 * This is actually impossible without something
2008				 * going really wrong (like weird race condition
2009				 * or cosmic rays).
2010				 */
2011				ASSERT(0);
2012				ret = -EINVAL;
2013				goto out;
2014			}
2015			ret = merge_reloc_root(rc, root);
2016			btrfs_put_root(root);
2017			if (ret) {
2018				if (list_empty(&reloc_root->root_list))
2019					list_add_tail(&reloc_root->root_list,
2020						      &reloc_roots);
2021				goto out;
2022			}
2023		} else {
2024			if (!IS_ERR(root)) {
2025				if (root->reloc_root == reloc_root) {
2026					root->reloc_root = NULL;
2027					btrfs_put_root(reloc_root);
2028				}
2029				clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2030					  &root->state);
2031				btrfs_put_root(root);
2032			}
2033
2034			list_del_init(&reloc_root->root_list);
2035			/* Don't forget to queue this reloc root for cleanup */
2036			list_add_tail(&reloc_root->reloc_dirty_list,
2037				      &rc->dirty_subvol_roots);
2038		}
2039	}
2040
2041	if (found) {
2042		found = 0;
2043		goto again;
2044	}
2045out:
2046	if (ret) {
2047		btrfs_handle_fs_error(fs_info, ret, NULL);
2048		free_reloc_roots(&reloc_roots);
2049
2050		/* new reloc root may be added */
2051		mutex_lock(&fs_info->reloc_mutex);
2052		list_splice_init(&rc->reloc_roots, &reloc_roots);
2053		mutex_unlock(&fs_info->reloc_mutex);
2054		free_reloc_roots(&reloc_roots);
2055	}
2056
2057	/*
2058	 * We used to have
2059	 *
2060	 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2061	 *
2062	 * here, but it's wrong.  If we fail to start the transaction in
2063	 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2064	 * have actually been removed from the reloc_root_tree rb tree.  This is
2065	 * fine because we're bailing here, and we hold a reference on the root
2066	 * for the list that holds it, so these roots will be cleaned up when we
2067	 * do the reloc_dirty_list afterwards.  Meanwhile the root->reloc_root
2068	 * will be cleaned up on unmount.
2069	 *
2070	 * The remaining nodes will be cleaned up by free_reloc_control.
2071	 */
2072}
2073
2074static void free_block_list(struct rb_root *blocks)
2075{
2076	struct tree_block *block;
2077	struct rb_node *rb_node;
2078	while ((rb_node = rb_first(blocks))) {
2079		block = rb_entry(rb_node, struct tree_block, rb_node);
2080		rb_erase(rb_node, blocks);
2081		kfree(block);
2082	}
2083}
2084
2085static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2086				      struct btrfs_root *reloc_root)
2087{
2088	struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2089	struct btrfs_root *root;
2090	int ret;
2091
2092	if (reloc_root->last_trans == trans->transid)
2093		return 0;
2094
2095	root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2096
2097	/*
2098	 * This should succeed, since we can't have a reloc root without having
2099	 * already looked up the actual root and created the reloc root for this
2100	 * root.
2101	 *
2102	 * However if there's some sort of corruption where we have a ref to a
2103	 * reloc root without a corresponding root this could return ENOENT.
2104	 */
2105	if (IS_ERR(root)) {
2106		ASSERT(0);
2107		return PTR_ERR(root);
2108	}
2109	if (root->reloc_root != reloc_root) {
2110		ASSERT(0);
2111		btrfs_err(fs_info,
2112			  "root %llu has two reloc roots associated with it",
2113			  reloc_root->root_key.offset);
2114		btrfs_put_root(root);
2115		return -EUCLEAN;
2116	}
2117	ret = btrfs_record_root_in_trans(trans, root);
2118	btrfs_put_root(root);
2119
2120	return ret;
2121}
2122
2123static noinline_for_stack
2124struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2125				     struct reloc_control *rc,
2126				     struct btrfs_backref_node *node,
2127				     struct btrfs_backref_edge *edges[])
2128{
2129	struct btrfs_backref_node *next;
2130	struct btrfs_root *root;
2131	int index = 0;
2132	int ret;
2133
2134	next = node;
2135	while (1) {
2136		cond_resched();
2137		next = walk_up_backref(next, edges, &index);
2138		root = next->root;
2139
2140		/*
2141		 * If there is no root, then our references for this block are
2142		 * incomplete, as we should be able to walk all the way up to a
2143		 * block that is owned by a root.
2144		 *
2145		 * This path is only for SHAREABLE roots, so if we come upon a
2146		 * non-SHAREABLE root then we have backrefs that resolve
2147		 * improperly.
2148		 *
2149		 * Both of these cases indicate file system corruption, or a bug
2150		 * in the backref walking code.
2151		 */
2152		if (!root) {
2153			ASSERT(0);
2154			btrfs_err(trans->fs_info,
2155		"bytenr %llu doesn't have a backref path ending in a root",
2156				  node->bytenr);
2157			return ERR_PTR(-EUCLEAN);
2158		}
2159		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2160			ASSERT(0);
2161			btrfs_err(trans->fs_info,
2162	"bytenr %llu has multiple refs with one ending in a non-shareable root",
2163				  node->bytenr);
2164			return ERR_PTR(-EUCLEAN);
2165		}
2166
2167		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2168			ret = record_reloc_root_in_trans(trans, root);
2169			if (ret)
2170				return ERR_PTR(ret);
2171			break;
2172		}
2173
2174		ret = btrfs_record_root_in_trans(trans, root);
2175		if (ret)
2176			return ERR_PTR(ret);
2177		root = root->reloc_root;
2178
2179		/*
2180		 * We could have raced with another thread which failed, so
2181		 * root->reloc_root may not be set, return ENOENT in this case.
2182		 */
2183		if (!root)
2184			return ERR_PTR(-ENOENT);
2185
2186		if (next->new_bytenr != root->node->start) {
2187			/*
2188			 * We just created the reloc root, so we shouldn't have
2189			 * ->new_bytenr set and this shouldn't be in the changed
2190			 *  list.  If it is then we have multiple roots pointing
2191			 *  at the same bytenr which indicates corruption, or
2192			 *  we've made a mistake in the backref walking code.
2193			 */
2194			ASSERT(next->new_bytenr == 0);
2195			ASSERT(list_empty(&next->list));
2196			if (next->new_bytenr || !list_empty(&next->list)) {
2197				btrfs_err(trans->fs_info,
2198	"bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2199					  node->bytenr, next->bytenr);
2200				return ERR_PTR(-EUCLEAN);
2201			}
2202
2203			next->new_bytenr = root->node->start;
2204			btrfs_put_root(next->root);
2205			next->root = btrfs_grab_root(root);
2206			ASSERT(next->root);
2207			list_add_tail(&next->list,
2208				      &rc->backref_cache.changed);
2209			mark_block_processed(rc, next);
2210			break;
2211		}
2212
2213		WARN_ON(1);
2214		root = NULL;
2215		next = walk_down_backref(edges, &index);
2216		if (!next || next->level <= node->level)
2217			break;
2218	}
2219	if (!root) {
2220		/*
2221		 * This can happen if there's fs corruption or if there's a bug
2222		 * in the backref lookup code.
2223		 */
2224		ASSERT(0);
2225		return ERR_PTR(-ENOENT);
2226	}
2227
2228	next = node;
2229	/* setup backref node path for btrfs_reloc_cow_block */
2230	while (1) {
2231		rc->backref_cache.path[next->level] = next;
2232		if (--index < 0)
2233			break;
2234		next = edges[index]->node[UPPER];
2235	}
2236	return root;
2237}
2238
2239/*
2240 * Select a tree root for relocation.
2241 *
2242 * Return NULL if the block is not shareable. We should use do_relocation() in
2243 * this case.
2244 *
2245 * Return a tree root pointer if the block is shareable.
2246 * Return -ENOENT if the block is root of reloc tree.
2247 */
2248static noinline_for_stack
2249struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2250{
2251	struct btrfs_backref_node *next;
2252	struct btrfs_root *root;
2253	struct btrfs_root *fs_root = NULL;
2254	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2255	int index = 0;
2256
2257	next = node;
2258	while (1) {
2259		cond_resched();
2260		next = walk_up_backref(next, edges, &index);
2261		root = next->root;
2262
2263		/*
2264		 * This can occur if we have incomplete extent refs leading all
2265		 * the way up a particular path, in this case return -EUCLEAN.
2266		 */
2267		if (!root)
2268			return ERR_PTR(-EUCLEAN);
2269
2270		/* No other choice for non-shareable tree */
2271		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2272			return root;
2273
2274		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2275			fs_root = root;
2276
2277		if (next != node)
2278			return NULL;
2279
2280		next = walk_down_backref(edges, &index);
2281		if (!next || next->level <= node->level)
2282			break;
2283	}
2284
2285	if (!fs_root)
2286		return ERR_PTR(-ENOENT);
2287	return fs_root;
2288}
2289
2290static noinline_for_stack
2291u64 calcu_metadata_size(struct reloc_control *rc,
2292			struct btrfs_backref_node *node, int reserve)
2293{
2294	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2295	struct btrfs_backref_node *next = node;
2296	struct btrfs_backref_edge *edge;
2297	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2298	u64 num_bytes = 0;
2299	int index = 0;
2300
2301	BUG_ON(reserve && node->processed);
2302
2303	while (next) {
2304		cond_resched();
2305		while (1) {
2306			if (next->processed && (reserve || next != node))
2307				break;
2308
2309			num_bytes += fs_info->nodesize;
2310
2311			if (list_empty(&next->upper))
2312				break;
2313
2314			edge = list_entry(next->upper.next,
2315					struct btrfs_backref_edge, list[LOWER]);
2316			edges[index++] = edge;
2317			next = edge->node[UPPER];
2318		}
2319		next = walk_down_backref(edges, &index);
2320	}
2321	return num_bytes;
2322}
2323
2324static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2325				  struct reloc_control *rc,
2326				  struct btrfs_backref_node *node)
2327{
2328	struct btrfs_root *root = rc->extent_root;
2329	struct btrfs_fs_info *fs_info = root->fs_info;
2330	u64 num_bytes;
2331	int ret;
2332	u64 tmp;
2333
2334	num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2335
2336	trans->block_rsv = rc->block_rsv;
2337	rc->reserved_bytes += num_bytes;
2338
2339	/*
2340	 * We are under a transaction here so we can only do limited flushing.
2341	 * If we get an enospc just kick back -EAGAIN so we know to drop the
2342	 * transaction and try to refill when we can flush all the things.
2343	 */
2344	ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2345				     BTRFS_RESERVE_FLUSH_LIMIT);
2346	if (ret) {
2347		tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2348		while (tmp <= rc->reserved_bytes)
2349			tmp <<= 1;
2350		/*
2351		 * only one thread can access block_rsv at this point,
2352		 * so we don't need hold lock to protect block_rsv.
2353		 * we expand more reservation size here to allow enough
2354		 * space for relocation and we will return earlier in
2355		 * enospc case.
2356		 */
2357		rc->block_rsv->size = tmp + fs_info->nodesize *
2358				      RELOCATION_RESERVED_NODES;
2359		return -EAGAIN;
2360	}
2361
2362	return 0;
2363}
2364
2365/*
2366 * relocate a block tree, and then update pointers in upper level
2367 * blocks that reference the block to point to the new location.
2368 *
2369 * if called by link_to_upper, the block has already been relocated.
2370 * in that case this function just updates pointers.
2371 */
2372static int do_relocation(struct btrfs_trans_handle *trans,
2373			 struct reloc_control *rc,
2374			 struct btrfs_backref_node *node,
2375			 struct btrfs_key *key,
2376			 struct btrfs_path *path, int lowest)
2377{
2378	struct btrfs_backref_node *upper;
2379	struct btrfs_backref_edge *edge;
2380	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2381	struct btrfs_root *root;
2382	struct extent_buffer *eb;
2383	u32 blocksize;
2384	u64 bytenr;
2385	int slot;
2386	int ret = 0;
2387
2388	/*
2389	 * If we are lowest then this is the first time we're processing this
2390	 * block, and thus shouldn't have an eb associated with it yet.
2391	 */
2392	ASSERT(!lowest || !node->eb);
2393
2394	path->lowest_level = node->level + 1;
2395	rc->backref_cache.path[node->level] = node;
2396	list_for_each_entry(edge, &node->upper, list[LOWER]) {
2397		struct btrfs_ref ref = { 0 };
2398
2399		cond_resched();
2400
2401		upper = edge->node[UPPER];
2402		root = select_reloc_root(trans, rc, upper, edges);
2403		if (IS_ERR(root)) {
2404			ret = PTR_ERR(root);
2405			goto next;
2406		}
2407
2408		if (upper->eb && !upper->locked) {
2409			if (!lowest) {
2410				ret = btrfs_bin_search(upper->eb, key, &slot);
2411				if (ret < 0)
2412					goto next;
2413				BUG_ON(ret);
2414				bytenr = btrfs_node_blockptr(upper->eb, slot);
2415				if (node->eb->start == bytenr)
2416					goto next;
2417			}
2418			btrfs_backref_drop_node_buffer(upper);
2419		}
2420
2421		if (!upper->eb) {
2422			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2423			if (ret) {
2424				if (ret > 0)
2425					ret = -ENOENT;
2426
2427				btrfs_release_path(path);
2428				break;
2429			}
2430
2431			if (!upper->eb) {
2432				upper->eb = path->nodes[upper->level];
2433				path->nodes[upper->level] = NULL;
2434			} else {
2435				BUG_ON(upper->eb != path->nodes[upper->level]);
2436			}
2437
2438			upper->locked = 1;
2439			path->locks[upper->level] = 0;
2440
2441			slot = path->slots[upper->level];
2442			btrfs_release_path(path);
2443		} else {
2444			ret = btrfs_bin_search(upper->eb, key, &slot);
2445			if (ret < 0)
2446				goto next;
2447			BUG_ON(ret);
2448		}
2449
2450		bytenr = btrfs_node_blockptr(upper->eb, slot);
2451		if (lowest) {
2452			if (bytenr != node->bytenr) {
2453				btrfs_err(root->fs_info,
2454		"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2455					  bytenr, node->bytenr, slot,
2456					  upper->eb->start);
2457				ret = -EIO;
2458				goto next;
2459			}
2460		} else {
2461			if (node->eb->start == bytenr)
2462				goto next;
2463		}
2464
2465		blocksize = root->fs_info->nodesize;
2466		eb = btrfs_read_node_slot(upper->eb, slot);
2467		if (IS_ERR(eb)) {
2468			ret = PTR_ERR(eb);
2469			goto next;
2470		}
2471		btrfs_tree_lock(eb);
2472
2473		if (!node->eb) {
2474			ret = btrfs_cow_block(trans, root, eb, upper->eb,
2475					      slot, &eb, BTRFS_NESTING_COW);
2476			btrfs_tree_unlock(eb);
2477			free_extent_buffer(eb);
2478			if (ret < 0)
2479				goto next;
2480			/*
2481			 * We've just COWed this block, it should have updated
2482			 * the correct backref node entry.
2483			 */
2484			ASSERT(node->eb == eb);
2485		} else {
2486			btrfs_set_node_blockptr(upper->eb, slot,
2487						node->eb->start);
2488			btrfs_set_node_ptr_generation(upper->eb, slot,
2489						      trans->transid);
2490			btrfs_mark_buffer_dirty(upper->eb);
2491
2492			btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2493					       node->eb->start, blocksize,
2494					       upper->eb->start);
 
2495			btrfs_init_tree_ref(&ref, node->level,
2496					    btrfs_header_owner(upper->eb),
2497					    root->root_key.objectid, false);
2498			ret = btrfs_inc_extent_ref(trans, &ref);
2499			if (!ret)
2500				ret = btrfs_drop_subtree(trans, root, eb,
2501							 upper->eb);
2502			if (ret)
2503				btrfs_abort_transaction(trans, ret);
2504		}
2505next:
2506		if (!upper->pending)
2507			btrfs_backref_drop_node_buffer(upper);
2508		else
2509			btrfs_backref_unlock_node_buffer(upper);
2510		if (ret)
2511			break;
2512	}
2513
2514	if (!ret && node->pending) {
2515		btrfs_backref_drop_node_buffer(node);
2516		list_move_tail(&node->list, &rc->backref_cache.changed);
2517		node->pending = 0;
2518	}
2519
2520	path->lowest_level = 0;
2521
2522	/*
2523	 * We should have allocated all of our space in the block rsv and thus
2524	 * shouldn't ENOSPC.
2525	 */
2526	ASSERT(ret != -ENOSPC);
2527	return ret;
2528}
2529
2530static int link_to_upper(struct btrfs_trans_handle *trans,
2531			 struct reloc_control *rc,
2532			 struct btrfs_backref_node *node,
2533			 struct btrfs_path *path)
2534{
2535	struct btrfs_key key;
2536
2537	btrfs_node_key_to_cpu(node->eb, &key, 0);
2538	return do_relocation(trans, rc, node, &key, path, 0);
2539}
2540
2541static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2542				struct reloc_control *rc,
2543				struct btrfs_path *path, int err)
2544{
2545	LIST_HEAD(list);
2546	struct btrfs_backref_cache *cache = &rc->backref_cache;
2547	struct btrfs_backref_node *node;
2548	int level;
2549	int ret;
2550
2551	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2552		while (!list_empty(&cache->pending[level])) {
2553			node = list_entry(cache->pending[level].next,
2554					  struct btrfs_backref_node, list);
2555			list_move_tail(&node->list, &list);
2556			BUG_ON(!node->pending);
2557
2558			if (!err) {
2559				ret = link_to_upper(trans, rc, node, path);
2560				if (ret < 0)
2561					err = ret;
2562			}
2563		}
2564		list_splice_init(&list, &cache->pending[level]);
2565	}
2566	return err;
2567}
2568
2569/*
2570 * mark a block and all blocks directly/indirectly reference the block
2571 * as processed.
2572 */
2573static void update_processed_blocks(struct reloc_control *rc,
2574				    struct btrfs_backref_node *node)
2575{
2576	struct btrfs_backref_node *next = node;
2577	struct btrfs_backref_edge *edge;
2578	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2579	int index = 0;
2580
2581	while (next) {
2582		cond_resched();
2583		while (1) {
2584			if (next->processed)
2585				break;
2586
2587			mark_block_processed(rc, next);
2588
2589			if (list_empty(&next->upper))
2590				break;
2591
2592			edge = list_entry(next->upper.next,
2593					struct btrfs_backref_edge, list[LOWER]);
2594			edges[index++] = edge;
2595			next = edge->node[UPPER];
2596		}
2597		next = walk_down_backref(edges, &index);
2598	}
2599}
2600
2601static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2602{
2603	u32 blocksize = rc->extent_root->fs_info->nodesize;
2604
2605	if (test_range_bit(&rc->processed_blocks, bytenr,
2606			   bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2607		return 1;
2608	return 0;
2609}
2610
2611static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2612			      struct tree_block *block)
2613{
2614	struct btrfs_tree_parent_check check = {
2615		.level = block->level,
2616		.owner_root = block->owner,
2617		.transid = block->key.offset
2618	};
2619	struct extent_buffer *eb;
2620
2621	eb = read_tree_block(fs_info, block->bytenr, &check);
2622	if (IS_ERR(eb))
 
2623		return PTR_ERR(eb);
2624	if (!extent_buffer_uptodate(eb)) {
2625		free_extent_buffer(eb);
2626		return -EIO;
2627	}
2628	if (block->level == 0)
2629		btrfs_item_key_to_cpu(eb, &block->key, 0);
2630	else
2631		btrfs_node_key_to_cpu(eb, &block->key, 0);
2632	free_extent_buffer(eb);
2633	block->key_ready = 1;
2634	return 0;
2635}
2636
2637/*
2638 * helper function to relocate a tree block
2639 */
2640static int relocate_tree_block(struct btrfs_trans_handle *trans,
2641				struct reloc_control *rc,
2642				struct btrfs_backref_node *node,
2643				struct btrfs_key *key,
2644				struct btrfs_path *path)
2645{
2646	struct btrfs_root *root;
2647	int ret = 0;
2648
2649	if (!node)
2650		return 0;
2651
2652	/*
2653	 * If we fail here we want to drop our backref_node because we are going
2654	 * to start over and regenerate the tree for it.
2655	 */
2656	ret = reserve_metadata_space(trans, rc, node);
2657	if (ret)
2658		goto out;
2659
2660	BUG_ON(node->processed);
2661	root = select_one_root(node);
2662	if (IS_ERR(root)) {
2663		ret = PTR_ERR(root);
2664
2665		/* See explanation in select_one_root for the -EUCLEAN case. */
2666		ASSERT(ret == -ENOENT);
2667		if (ret == -ENOENT) {
2668			ret = 0;
2669			update_processed_blocks(rc, node);
2670		}
2671		goto out;
2672	}
2673
2674	if (root) {
2675		if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2676			/*
2677			 * This block was the root block of a root, and this is
2678			 * the first time we're processing the block and thus it
2679			 * should not have had the ->new_bytenr modified and
2680			 * should have not been included on the changed list.
2681			 *
2682			 * However in the case of corruption we could have
2683			 * multiple refs pointing to the same block improperly,
2684			 * and thus we would trip over these checks.  ASSERT()
2685			 * for the developer case, because it could indicate a
2686			 * bug in the backref code, however error out for a
2687			 * normal user in the case of corruption.
2688			 */
2689			ASSERT(node->new_bytenr == 0);
2690			ASSERT(list_empty(&node->list));
2691			if (node->new_bytenr || !list_empty(&node->list)) {
2692				btrfs_err(root->fs_info,
2693				  "bytenr %llu has improper references to it",
2694					  node->bytenr);
2695				ret = -EUCLEAN;
2696				goto out;
2697			}
2698			ret = btrfs_record_root_in_trans(trans, root);
2699			if (ret)
2700				goto out;
2701			/*
2702			 * Another thread could have failed, need to check if we
2703			 * have reloc_root actually set.
2704			 */
2705			if (!root->reloc_root) {
2706				ret = -ENOENT;
2707				goto out;
2708			}
2709			root = root->reloc_root;
2710			node->new_bytenr = root->node->start;
2711			btrfs_put_root(node->root);
2712			node->root = btrfs_grab_root(root);
2713			ASSERT(node->root);
2714			list_add_tail(&node->list, &rc->backref_cache.changed);
2715		} else {
2716			path->lowest_level = node->level;
2717			if (root == root->fs_info->chunk_root)
2718				btrfs_reserve_chunk_metadata(trans, false);
2719			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2720			btrfs_release_path(path);
2721			if (root == root->fs_info->chunk_root)
2722				btrfs_trans_release_chunk_metadata(trans);
2723			if (ret > 0)
2724				ret = 0;
2725		}
2726		if (!ret)
2727			update_processed_blocks(rc, node);
2728	} else {
2729		ret = do_relocation(trans, rc, node, key, path, 1);
2730	}
2731out:
2732	if (ret || node->level == 0 || node->cowonly)
2733		btrfs_backref_cleanup_node(&rc->backref_cache, node);
2734	return ret;
2735}
2736
2737/*
2738 * relocate a list of blocks
2739 */
2740static noinline_for_stack
2741int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2742			 struct reloc_control *rc, struct rb_root *blocks)
2743{
2744	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2745	struct btrfs_backref_node *node;
2746	struct btrfs_path *path;
2747	struct tree_block *block;
2748	struct tree_block *next;
2749	int ret;
2750	int err = 0;
2751
2752	path = btrfs_alloc_path();
2753	if (!path) {
2754		err = -ENOMEM;
2755		goto out_free_blocks;
2756	}
2757
2758	/* Kick in readahead for tree blocks with missing keys */
2759	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2760		if (!block->key_ready)
2761			btrfs_readahead_tree_block(fs_info, block->bytenr,
2762						   block->owner, 0,
2763						   block->level);
2764	}
2765
2766	/* Get first keys */
2767	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2768		if (!block->key_ready) {
2769			err = get_tree_block_key(fs_info, block);
2770			if (err)
2771				goto out_free_path;
2772		}
2773	}
2774
2775	/* Do tree relocation */
2776	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2777		node = build_backref_tree(rc, &block->key,
2778					  block->level, block->bytenr);
2779		if (IS_ERR(node)) {
2780			err = PTR_ERR(node);
2781			goto out;
2782		}
2783
2784		ret = relocate_tree_block(trans, rc, node, &block->key,
2785					  path);
2786		if (ret < 0) {
2787			err = ret;
2788			break;
2789		}
2790	}
2791out:
2792	err = finish_pending_nodes(trans, rc, path, err);
2793
2794out_free_path:
2795	btrfs_free_path(path);
2796out_free_blocks:
2797	free_block_list(blocks);
2798	return err;
2799}
2800
2801static noinline_for_stack int prealloc_file_extent_cluster(
2802				struct btrfs_inode *inode,
2803				struct file_extent_cluster *cluster)
2804{
2805	u64 alloc_hint = 0;
2806	u64 start;
2807	u64 end;
2808	u64 offset = inode->index_cnt;
2809	u64 num_bytes;
2810	int nr;
2811	int ret = 0;
2812	u64 i_size = i_size_read(&inode->vfs_inode);
2813	u64 prealloc_start = cluster->start - offset;
2814	u64 prealloc_end = cluster->end - offset;
2815	u64 cur_offset = prealloc_start;
2816
2817	/*
2818	 * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2819	 * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2820	 * btrfs_do_readpage() call of previously relocated file cluster.
2821	 *
2822	 * If the current cluster starts in the above range, btrfs_do_readpage()
2823	 * will skip the read, and relocate_one_page() will later writeback
2824	 * the padding zeros as new data, causing data corruption.
2825	 *
2826	 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2827	 */
2828	if (!IS_ALIGNED(i_size, PAGE_SIZE)) {
2829		struct address_space *mapping = inode->vfs_inode.i_mapping;
2830		struct btrfs_fs_info *fs_info = inode->root->fs_info;
2831		const u32 sectorsize = fs_info->sectorsize;
2832		struct page *page;
2833
2834		ASSERT(sectorsize < PAGE_SIZE);
2835		ASSERT(IS_ALIGNED(i_size, sectorsize));
2836
2837		/*
2838		 * Subpage can't handle page with DIRTY but without UPTODATE
2839		 * bit as it can lead to the following deadlock:
2840		 *
2841		 * btrfs_read_folio()
2842		 * | Page already *locked*
2843		 * |- btrfs_lock_and_flush_ordered_range()
2844		 *    |- btrfs_start_ordered_extent()
2845		 *       |- extent_write_cache_pages()
2846		 *          |- lock_page()
2847		 *             We try to lock the page we already hold.
2848		 *
2849		 * Here we just writeback the whole data reloc inode, so that
2850		 * we will be ensured to have no dirty range in the page, and
2851		 * are safe to clear the uptodate bits.
2852		 *
2853		 * This shouldn't cause too much overhead, as we need to write
2854		 * the data back anyway.
2855		 */
2856		ret = filemap_write_and_wait(mapping);
2857		if (ret < 0)
2858			return ret;
2859
2860		clear_extent_bits(&inode->io_tree, i_size,
2861				  round_up(i_size, PAGE_SIZE) - 1,
2862				  EXTENT_UPTODATE);
2863		page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
2864		/*
2865		 * If page is freed we don't need to do anything then, as we
2866		 * will re-read the whole page anyway.
2867		 */
2868		if (page) {
2869			btrfs_subpage_clear_uptodate(fs_info, page, i_size,
2870					round_up(i_size, PAGE_SIZE) - i_size);
2871			unlock_page(page);
2872			put_page(page);
2873		}
2874	}
2875
2876	BUG_ON(cluster->start != cluster->boundary[0]);
2877	ret = btrfs_alloc_data_chunk_ondemand(inode,
2878					      prealloc_end + 1 - prealloc_start);
2879	if (ret)
2880		return ret;
2881
2882	btrfs_inode_lock(inode, 0);
2883	for (nr = 0; nr < cluster->nr; nr++) {
2884		struct extent_state *cached_state = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2885
 
 
2886		start = cluster->boundary[nr] - offset;
2887		if (nr + 1 < cluster->nr)
2888			end = cluster->boundary[nr + 1] - 1 - offset;
2889		else
2890			end = cluster->end - offset;
2891
2892		lock_extent(&inode->io_tree, start, end, &cached_state);
2893		num_bytes = end + 1 - start;
2894		ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2895						num_bytes, num_bytes,
2896						end + 1, &alloc_hint);
2897		cur_offset = end + 1;
2898		unlock_extent(&inode->io_tree, start, end, &cached_state);
2899		if (ret)
2900			break;
2901	}
2902	btrfs_inode_unlock(inode, 0);
2903
2904	if (cur_offset < prealloc_end)
2905		btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2906					       prealloc_end + 1 - cur_offset);
2907	return ret;
2908}
2909
2910static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
2911				u64 start, u64 end, u64 block_start)
 
2912{
 
2913	struct extent_map *em;
2914	struct extent_state *cached_state = NULL;
2915	int ret = 0;
2916
2917	em = alloc_extent_map();
2918	if (!em)
2919		return -ENOMEM;
2920
2921	em->start = start;
2922	em->len = end + 1 - start;
2923	em->block_len = em->len;
2924	em->block_start = block_start;
2925	set_bit(EXTENT_FLAG_PINNED, &em->flags);
2926
2927	lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2928	ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
2929	unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2930	free_extent_map(em);
2931
 
 
 
 
 
 
 
2932	return ret;
2933}
2934
2935/*
2936 * Allow error injection to test balance/relocation cancellation
2937 */
2938noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2939{
2940	return atomic_read(&fs_info->balance_cancel_req) ||
2941		atomic_read(&fs_info->reloc_cancel_req) ||
2942		fatal_signal_pending(current);
2943}
2944ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2945
2946static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
2947				    int cluster_nr)
2948{
2949	/* Last extent, use cluster end directly */
2950	if (cluster_nr >= cluster->nr - 1)
2951		return cluster->end;
2952
2953	/* Use next boundary start*/
2954	return cluster->boundary[cluster_nr + 1] - 1;
2955}
2956
2957static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
2958			     struct file_extent_cluster *cluster,
2959			     int *cluster_nr, unsigned long page_index)
2960{
2961	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2962	u64 offset = BTRFS_I(inode)->index_cnt;
2963	const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2964	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2965	struct page *page;
2966	u64 page_start;
2967	u64 page_end;
2968	u64 cur;
2969	int ret;
2970
2971	ASSERT(page_index <= last_index);
2972	page = find_lock_page(inode->i_mapping, page_index);
2973	if (!page) {
2974		page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2975				page_index, last_index + 1 - page_index);
2976		page = find_or_create_page(inode->i_mapping, page_index, mask);
2977		if (!page)
2978			return -ENOMEM;
2979	}
2980	ret = set_page_extent_mapped(page);
2981	if (ret < 0)
2982		goto release_page;
2983
2984	if (PageReadahead(page))
2985		page_cache_async_readahead(inode->i_mapping, ra, NULL,
2986				page_folio(page), page_index,
2987				last_index + 1 - page_index);
2988
2989	if (!PageUptodate(page)) {
2990		btrfs_read_folio(NULL, page_folio(page));
2991		lock_page(page);
2992		if (!PageUptodate(page)) {
2993			ret = -EIO;
2994			goto release_page;
2995		}
2996	}
2997
2998	page_start = page_offset(page);
2999	page_end = page_start + PAGE_SIZE - 1;
3000
3001	/*
3002	 * Start from the cluster, as for subpage case, the cluster can start
3003	 * inside the page.
3004	 */
3005	cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
3006	while (cur <= page_end) {
3007		struct extent_state *cached_state = NULL;
3008		u64 extent_start = cluster->boundary[*cluster_nr] - offset;
3009		u64 extent_end = get_cluster_boundary_end(cluster,
3010						*cluster_nr) - offset;
3011		u64 clamped_start = max(page_start, extent_start);
3012		u64 clamped_end = min(page_end, extent_end);
3013		u32 clamped_len = clamped_end + 1 - clamped_start;
3014
3015		/* Reserve metadata for this range */
3016		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3017						      clamped_len, clamped_len,
3018						      false);
3019		if (ret)
3020			goto release_page;
3021
3022		/* Mark the range delalloc and dirty for later writeback */
3023		lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3024			    &cached_state);
3025		ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
3026						clamped_end, 0, &cached_state);
3027		if (ret) {
3028			clear_extent_bit(&BTRFS_I(inode)->io_tree,
3029					 clamped_start, clamped_end,
3030					 EXTENT_LOCKED | EXTENT_BOUNDARY,
3031					 &cached_state);
3032			btrfs_delalloc_release_metadata(BTRFS_I(inode),
3033							clamped_len, true);
3034			btrfs_delalloc_release_extents(BTRFS_I(inode),
3035						       clamped_len);
3036			goto release_page;
3037		}
3038		btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
3039
3040		/*
3041		 * Set the boundary if it's inside the page.
3042		 * Data relocation requires the destination extents to have the
3043		 * same size as the source.
3044		 * EXTENT_BOUNDARY bit prevents current extent from being merged
3045		 * with previous extent.
3046		 */
3047		if (in_range(cluster->boundary[*cluster_nr] - offset,
3048			     page_start, PAGE_SIZE)) {
3049			u64 boundary_start = cluster->boundary[*cluster_nr] -
3050						offset;
3051			u64 boundary_end = boundary_start +
3052					   fs_info->sectorsize - 1;
3053
3054			set_extent_bits(&BTRFS_I(inode)->io_tree,
3055					boundary_start, boundary_end,
3056					EXTENT_BOUNDARY);
3057		}
3058		unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3059			      &cached_state);
3060		btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
3061		cur += clamped_len;
3062
3063		/* Crossed extent end, go to next extent */
3064		if (cur >= extent_end) {
3065			(*cluster_nr)++;
3066			/* Just finished the last extent of the cluster, exit. */
3067			if (*cluster_nr >= cluster->nr)
3068				break;
3069		}
3070	}
3071	unlock_page(page);
3072	put_page(page);
3073
3074	balance_dirty_pages_ratelimited(inode->i_mapping);
3075	btrfs_throttle(fs_info);
3076	if (btrfs_should_cancel_balance(fs_info))
3077		ret = -ECANCELED;
3078	return ret;
3079
3080release_page:
3081	unlock_page(page);
3082	put_page(page);
3083	return ret;
3084}
3085
3086static int relocate_file_extent_cluster(struct inode *inode,
3087					struct file_extent_cluster *cluster)
3088{
3089	u64 offset = BTRFS_I(inode)->index_cnt;
3090	unsigned long index;
3091	unsigned long last_index;
 
3092	struct file_ra_state *ra;
3093	int cluster_nr = 0;
 
3094	int ret = 0;
3095
3096	if (!cluster->nr)
3097		return 0;
3098
3099	ra = kzalloc(sizeof(*ra), GFP_NOFS);
3100	if (!ra)
3101		return -ENOMEM;
3102
3103	ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
3104	if (ret)
3105		goto out;
3106
3107	file_ra_state_init(ra, inode->i_mapping);
3108
3109	ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
3110				   cluster->end - offset, cluster->start);
3111	if (ret)
3112		goto out;
3113
 
3114	last_index = (cluster->end - offset) >> PAGE_SHIFT;
3115	for (index = (cluster->start - offset) >> PAGE_SHIFT;
3116	     index <= last_index && !ret; index++)
3117		ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
3118	if (ret == 0)
3119		WARN_ON(cluster_nr != cluster->nr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3120out:
3121	kfree(ra);
3122	return ret;
3123}
3124
3125static noinline_for_stack
3126int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3127			 struct file_extent_cluster *cluster)
3128{
3129	int ret;
3130
3131	if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3132		ret = relocate_file_extent_cluster(inode, cluster);
3133		if (ret)
3134			return ret;
3135		cluster->nr = 0;
3136	}
3137
3138	if (!cluster->nr)
3139		cluster->start = extent_key->objectid;
3140	else
3141		BUG_ON(cluster->nr >= MAX_EXTENTS);
3142	cluster->end = extent_key->objectid + extent_key->offset - 1;
3143	cluster->boundary[cluster->nr] = extent_key->objectid;
3144	cluster->nr++;
3145
3146	if (cluster->nr >= MAX_EXTENTS) {
3147		ret = relocate_file_extent_cluster(inode, cluster);
3148		if (ret)
3149			return ret;
3150		cluster->nr = 0;
3151	}
3152	return 0;
3153}
3154
3155/*
3156 * helper to add a tree block to the list.
3157 * the major work is getting the generation and level of the block
3158 */
3159static int add_tree_block(struct reloc_control *rc,
3160			  struct btrfs_key *extent_key,
3161			  struct btrfs_path *path,
3162			  struct rb_root *blocks)
3163{
3164	struct extent_buffer *eb;
3165	struct btrfs_extent_item *ei;
3166	struct btrfs_tree_block_info *bi;
3167	struct tree_block *block;
3168	struct rb_node *rb_node;
3169	u32 item_size;
3170	int level = -1;
3171	u64 generation;
3172	u64 owner = 0;
3173
3174	eb =  path->nodes[0];
3175	item_size = btrfs_item_size(eb, path->slots[0]);
3176
3177	if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3178	    item_size >= sizeof(*ei) + sizeof(*bi)) {
3179		unsigned long ptr = 0, end;
3180
3181		ei = btrfs_item_ptr(eb, path->slots[0],
3182				struct btrfs_extent_item);
3183		end = (unsigned long)ei + item_size;
3184		if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3185			bi = (struct btrfs_tree_block_info *)(ei + 1);
3186			level = btrfs_tree_block_level(eb, bi);
3187			ptr = (unsigned long)(bi + 1);
3188		} else {
3189			level = (int)extent_key->offset;
3190			ptr = (unsigned long)(ei + 1);
3191		}
3192		generation = btrfs_extent_generation(eb, ei);
3193
3194		/*
3195		 * We're reading random blocks without knowing their owner ahead
3196		 * of time.  This is ok most of the time, as all reloc roots and
3197		 * fs roots have the same lock type.  However normal trees do
3198		 * not, and the only way to know ahead of time is to read the
3199		 * inline ref offset.  We know it's an fs root if
3200		 *
3201		 * 1. There's more than one ref.
3202		 * 2. There's a SHARED_DATA_REF_KEY set.
3203		 * 3. FULL_BACKREF is set on the flags.
3204		 *
3205		 * Otherwise it's safe to assume that the ref offset == the
3206		 * owner of this block, so we can use that when calling
3207		 * read_tree_block.
3208		 */
3209		if (btrfs_extent_refs(eb, ei) == 1 &&
3210		    !(btrfs_extent_flags(eb, ei) &
3211		      BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3212		    ptr < end) {
3213			struct btrfs_extent_inline_ref *iref;
3214			int type;
3215
3216			iref = (struct btrfs_extent_inline_ref *)ptr;
3217			type = btrfs_get_extent_inline_ref_type(eb, iref,
3218							BTRFS_REF_TYPE_BLOCK);
3219			if (type == BTRFS_REF_TYPE_INVALID)
3220				return -EINVAL;
3221			if (type == BTRFS_TREE_BLOCK_REF_KEY)
3222				owner = btrfs_extent_inline_ref_offset(eb, iref);
3223		}
3224	} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3225		btrfs_print_v0_err(eb->fs_info);
3226		btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3227		return -EINVAL;
3228	} else {
3229		BUG();
3230	}
3231
3232	btrfs_release_path(path);
3233
3234	BUG_ON(level == -1);
3235
3236	block = kmalloc(sizeof(*block), GFP_NOFS);
3237	if (!block)
3238		return -ENOMEM;
3239
3240	block->bytenr = extent_key->objectid;
3241	block->key.objectid = rc->extent_root->fs_info->nodesize;
3242	block->key.offset = generation;
3243	block->level = level;
3244	block->key_ready = 0;
3245	block->owner = owner;
3246
3247	rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3248	if (rb_node)
3249		btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3250				    -EEXIST);
3251
3252	return 0;
3253}
3254
3255/*
3256 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3257 */
3258static int __add_tree_block(struct reloc_control *rc,
3259			    u64 bytenr, u32 blocksize,
3260			    struct rb_root *blocks)
3261{
3262	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3263	struct btrfs_path *path;
3264	struct btrfs_key key;
3265	int ret;
3266	bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3267
3268	if (tree_block_processed(bytenr, rc))
3269		return 0;
3270
3271	if (rb_simple_search(blocks, bytenr))
3272		return 0;
3273
3274	path = btrfs_alloc_path();
3275	if (!path)
3276		return -ENOMEM;
3277again:
3278	key.objectid = bytenr;
3279	if (skinny) {
3280		key.type = BTRFS_METADATA_ITEM_KEY;
3281		key.offset = (u64)-1;
3282	} else {
3283		key.type = BTRFS_EXTENT_ITEM_KEY;
3284		key.offset = blocksize;
3285	}
3286
3287	path->search_commit_root = 1;
3288	path->skip_locking = 1;
3289	ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3290	if (ret < 0)
3291		goto out;
3292
3293	if (ret > 0 && skinny) {
3294		if (path->slots[0]) {
3295			path->slots[0]--;
3296			btrfs_item_key_to_cpu(path->nodes[0], &key,
3297					      path->slots[0]);
3298			if (key.objectid == bytenr &&
3299			    (key.type == BTRFS_METADATA_ITEM_KEY ||
3300			     (key.type == BTRFS_EXTENT_ITEM_KEY &&
3301			      key.offset == blocksize)))
3302				ret = 0;
3303		}
3304
3305		if (ret) {
3306			skinny = false;
3307			btrfs_release_path(path);
3308			goto again;
3309		}
3310	}
3311	if (ret) {
3312		ASSERT(ret == 1);
3313		btrfs_print_leaf(path->nodes[0]);
3314		btrfs_err(fs_info,
3315	     "tree block extent item (%llu) is not found in extent tree",
3316		     bytenr);
3317		WARN_ON(1);
3318		ret = -EINVAL;
3319		goto out;
3320	}
3321
3322	ret = add_tree_block(rc, &key, path, blocks);
3323out:
3324	btrfs_free_path(path);
3325	return ret;
3326}
3327
3328static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3329				    struct btrfs_block_group *block_group,
3330				    struct inode *inode,
3331				    u64 ino)
3332{
3333	struct btrfs_root *root = fs_info->tree_root;
3334	struct btrfs_trans_handle *trans;
3335	int ret = 0;
3336
3337	if (inode)
3338		goto truncate;
3339
3340	inode = btrfs_iget(fs_info->sb, ino, root);
3341	if (IS_ERR(inode))
3342		return -ENOENT;
3343
3344truncate:
3345	ret = btrfs_check_trunc_cache_free_space(fs_info,
3346						 &fs_info->global_block_rsv);
3347	if (ret)
3348		goto out;
3349
3350	trans = btrfs_join_transaction(root);
3351	if (IS_ERR(trans)) {
3352		ret = PTR_ERR(trans);
3353		goto out;
3354	}
3355
3356	ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3357
3358	btrfs_end_transaction(trans);
3359	btrfs_btree_balance_dirty(fs_info);
3360out:
3361	iput(inode);
3362	return ret;
3363}
3364
3365/*
3366 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3367 * cache inode, to avoid free space cache data extent blocking data relocation.
3368 */
3369static int delete_v1_space_cache(struct extent_buffer *leaf,
3370				 struct btrfs_block_group *block_group,
3371				 u64 data_bytenr)
3372{
3373	u64 space_cache_ino;
3374	struct btrfs_file_extent_item *ei;
3375	struct btrfs_key key;
3376	bool found = false;
3377	int i;
3378	int ret;
3379
3380	if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3381		return 0;
3382
3383	for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3384		u8 type;
3385
3386		btrfs_item_key_to_cpu(leaf, &key, i);
3387		if (key.type != BTRFS_EXTENT_DATA_KEY)
3388			continue;
3389		ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3390		type = btrfs_file_extent_type(leaf, ei);
3391
3392		if ((type == BTRFS_FILE_EXTENT_REG ||
3393		     type == BTRFS_FILE_EXTENT_PREALLOC) &&
3394		    btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3395			found = true;
3396			space_cache_ino = key.objectid;
3397			break;
3398		}
3399	}
3400	if (!found)
3401		return -ENOENT;
3402	ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3403					space_cache_ino);
3404	return ret;
3405}
3406
3407/*
3408 * helper to find all tree blocks that reference a given data extent
3409 */
3410static noinline_for_stack
3411int add_data_references(struct reloc_control *rc,
3412			struct btrfs_key *extent_key,
3413			struct btrfs_path *path,
3414			struct rb_root *blocks)
3415{
3416	struct btrfs_backref_walk_ctx ctx = { 0 };
 
3417	struct ulist_iterator leaf_uiter;
3418	struct ulist_node *ref_node = NULL;
3419	const u32 blocksize = rc->extent_root->fs_info->nodesize;
3420	int ret = 0;
3421
3422	btrfs_release_path(path);
3423
3424	ctx.bytenr = extent_key->objectid;
3425	ctx.ignore_extent_item_pos = true;
3426	ctx.fs_info = rc->extent_root->fs_info;
3427
3428	ret = btrfs_find_all_leafs(&ctx);
3429	if (ret < 0)
3430		return ret;
3431
3432	ULIST_ITER_INIT(&leaf_uiter);
3433	while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3434		struct btrfs_tree_parent_check check = { 0 };
3435		struct extent_buffer *eb;
3436
3437		eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
3438		if (IS_ERR(eb)) {
3439			ret = PTR_ERR(eb);
3440			break;
3441		}
3442		ret = delete_v1_space_cache(eb, rc->block_group,
3443					    extent_key->objectid);
3444		free_extent_buffer(eb);
3445		if (ret < 0)
3446			break;
3447		ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3448		if (ret < 0)
3449			break;
3450	}
3451	if (ret < 0)
3452		free_block_list(blocks);
3453	ulist_free(ctx.refs);
3454	return ret;
3455}
3456
3457/*
3458 * helper to find next unprocessed extent
3459 */
3460static noinline_for_stack
3461int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3462		     struct btrfs_key *extent_key)
3463{
3464	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3465	struct btrfs_key key;
3466	struct extent_buffer *leaf;
3467	u64 start, end, last;
3468	int ret;
3469
3470	last = rc->block_group->start + rc->block_group->length;
3471	while (1) {
3472		cond_resched();
3473		if (rc->search_start >= last) {
3474			ret = 1;
3475			break;
3476		}
3477
3478		key.objectid = rc->search_start;
3479		key.type = BTRFS_EXTENT_ITEM_KEY;
3480		key.offset = 0;
3481
3482		path->search_commit_root = 1;
3483		path->skip_locking = 1;
3484		ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3485					0, 0);
3486		if (ret < 0)
3487			break;
3488next:
3489		leaf = path->nodes[0];
3490		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3491			ret = btrfs_next_leaf(rc->extent_root, path);
3492			if (ret != 0)
3493				break;
3494			leaf = path->nodes[0];
3495		}
3496
3497		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3498		if (key.objectid >= last) {
3499			ret = 1;
3500			break;
3501		}
3502
3503		if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3504		    key.type != BTRFS_METADATA_ITEM_KEY) {
3505			path->slots[0]++;
3506			goto next;
3507		}
3508
3509		if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3510		    key.objectid + key.offset <= rc->search_start) {
3511			path->slots[0]++;
3512			goto next;
3513		}
3514
3515		if (key.type == BTRFS_METADATA_ITEM_KEY &&
3516		    key.objectid + fs_info->nodesize <=
3517		    rc->search_start) {
3518			path->slots[0]++;
3519			goto next;
3520		}
3521
3522		ret = find_first_extent_bit(&rc->processed_blocks,
3523					    key.objectid, &start, &end,
3524					    EXTENT_DIRTY, NULL);
3525
3526		if (ret == 0 && start <= key.objectid) {
3527			btrfs_release_path(path);
3528			rc->search_start = end + 1;
3529		} else {
3530			if (key.type == BTRFS_EXTENT_ITEM_KEY)
3531				rc->search_start = key.objectid + key.offset;
3532			else
3533				rc->search_start = key.objectid +
3534					fs_info->nodesize;
3535			memcpy(extent_key, &key, sizeof(key));
3536			return 0;
3537		}
3538	}
3539	btrfs_release_path(path);
3540	return ret;
3541}
3542
3543static void set_reloc_control(struct reloc_control *rc)
3544{
3545	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3546
3547	mutex_lock(&fs_info->reloc_mutex);
3548	fs_info->reloc_ctl = rc;
3549	mutex_unlock(&fs_info->reloc_mutex);
3550}
3551
3552static void unset_reloc_control(struct reloc_control *rc)
3553{
3554	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3555
3556	mutex_lock(&fs_info->reloc_mutex);
3557	fs_info->reloc_ctl = NULL;
3558	mutex_unlock(&fs_info->reloc_mutex);
3559}
3560
3561static noinline_for_stack
3562int prepare_to_relocate(struct reloc_control *rc)
3563{
3564	struct btrfs_trans_handle *trans;
3565	int ret;
3566
3567	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3568					      BTRFS_BLOCK_RSV_TEMP);
3569	if (!rc->block_rsv)
3570		return -ENOMEM;
3571
3572	memset(&rc->cluster, 0, sizeof(rc->cluster));
3573	rc->search_start = rc->block_group->start;
3574	rc->extents_found = 0;
3575	rc->nodes_relocated = 0;
3576	rc->merging_rsv_size = 0;
3577	rc->reserved_bytes = 0;
3578	rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3579			      RELOCATION_RESERVED_NODES;
3580	ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3581				     rc->block_rsv, rc->block_rsv->size,
3582				     BTRFS_RESERVE_FLUSH_ALL);
3583	if (ret)
3584		return ret;
3585
3586	rc->create_reloc_tree = 1;
3587	set_reloc_control(rc);
3588
3589	trans = btrfs_join_transaction(rc->extent_root);
3590	if (IS_ERR(trans)) {
3591		unset_reloc_control(rc);
3592		/*
3593		 * extent tree is not a ref_cow tree and has no reloc_root to
3594		 * cleanup.  And callers are responsible to free the above
3595		 * block rsv.
3596		 */
3597		return PTR_ERR(trans);
3598	}
3599
3600	ret = btrfs_commit_transaction(trans);
3601	if (ret)
3602		unset_reloc_control(rc);
3603
3604	return ret;
3605}
3606
3607static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3608{
3609	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3610	struct rb_root blocks = RB_ROOT;
3611	struct btrfs_key key;
3612	struct btrfs_trans_handle *trans = NULL;
3613	struct btrfs_path *path;
3614	struct btrfs_extent_item *ei;
3615	u64 flags;
3616	int ret;
3617	int err = 0;
3618	int progress = 0;
3619
3620	path = btrfs_alloc_path();
3621	if (!path)
3622		return -ENOMEM;
3623	path->reada = READA_FORWARD;
3624
3625	ret = prepare_to_relocate(rc);
3626	if (ret) {
3627		err = ret;
3628		goto out_free;
3629	}
3630
3631	while (1) {
3632		rc->reserved_bytes = 0;
3633		ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3634					     rc->block_rsv->size,
3635					     BTRFS_RESERVE_FLUSH_ALL);
3636		if (ret) {
3637			err = ret;
3638			break;
3639		}
3640		progress++;
3641		trans = btrfs_start_transaction(rc->extent_root, 0);
3642		if (IS_ERR(trans)) {
3643			err = PTR_ERR(trans);
3644			trans = NULL;
3645			break;
3646		}
3647restart:
3648		if (update_backref_cache(trans, &rc->backref_cache)) {
3649			btrfs_end_transaction(trans);
3650			trans = NULL;
3651			continue;
3652		}
3653
3654		ret = find_next_extent(rc, path, &key);
3655		if (ret < 0)
3656			err = ret;
3657		if (ret != 0)
3658			break;
3659
3660		rc->extents_found++;
3661
3662		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3663				    struct btrfs_extent_item);
3664		flags = btrfs_extent_flags(path->nodes[0], ei);
3665
3666		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3667			ret = add_tree_block(rc, &key, path, &blocks);
3668		} else if (rc->stage == UPDATE_DATA_PTRS &&
3669			   (flags & BTRFS_EXTENT_FLAG_DATA)) {
3670			ret = add_data_references(rc, &key, path, &blocks);
3671		} else {
3672			btrfs_release_path(path);
3673			ret = 0;
3674		}
3675		if (ret < 0) {
3676			err = ret;
3677			break;
3678		}
3679
3680		if (!RB_EMPTY_ROOT(&blocks)) {
3681			ret = relocate_tree_blocks(trans, rc, &blocks);
3682			if (ret < 0) {
3683				if (ret != -EAGAIN) {
3684					err = ret;
3685					break;
3686				}
3687				rc->extents_found--;
3688				rc->search_start = key.objectid;
3689			}
3690		}
3691
3692		btrfs_end_transaction_throttle(trans);
3693		btrfs_btree_balance_dirty(fs_info);
3694		trans = NULL;
3695
3696		if (rc->stage == MOVE_DATA_EXTENTS &&
3697		    (flags & BTRFS_EXTENT_FLAG_DATA)) {
3698			rc->found_file_extent = 1;
3699			ret = relocate_data_extent(rc->data_inode,
3700						   &key, &rc->cluster);
3701			if (ret < 0) {
3702				err = ret;
3703				break;
3704			}
3705		}
3706		if (btrfs_should_cancel_balance(fs_info)) {
3707			err = -ECANCELED;
3708			break;
3709		}
3710	}
3711	if (trans && progress && err == -ENOSPC) {
3712		ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3713		if (ret == 1) {
3714			err = 0;
3715			progress = 0;
3716			goto restart;
3717		}
3718	}
3719
3720	btrfs_release_path(path);
3721	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3722
3723	if (trans) {
3724		btrfs_end_transaction_throttle(trans);
3725		btrfs_btree_balance_dirty(fs_info);
3726	}
3727
3728	if (!err) {
3729		ret = relocate_file_extent_cluster(rc->data_inode,
3730						   &rc->cluster);
3731		if (ret < 0)
3732			err = ret;
3733	}
3734
3735	rc->create_reloc_tree = 0;
3736	set_reloc_control(rc);
3737
3738	btrfs_backref_release_cache(&rc->backref_cache);
3739	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3740
3741	/*
3742	 * Even in the case when the relocation is cancelled, we should all go
3743	 * through prepare_to_merge() and merge_reloc_roots().
3744	 *
3745	 * For error (including cancelled balance), prepare_to_merge() will
3746	 * mark all reloc trees orphan, then queue them for cleanup in
3747	 * merge_reloc_roots()
3748	 */
3749	err = prepare_to_merge(rc, err);
3750
3751	merge_reloc_roots(rc);
3752
3753	rc->merge_reloc_tree = 0;
3754	unset_reloc_control(rc);
3755	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3756
3757	/* get rid of pinned extents */
3758	trans = btrfs_join_transaction(rc->extent_root);
3759	if (IS_ERR(trans)) {
3760		err = PTR_ERR(trans);
3761		goto out_free;
3762	}
3763	ret = btrfs_commit_transaction(trans);
3764	if (ret && !err)
3765		err = ret;
3766out_free:
3767	ret = clean_dirty_subvols(rc);
3768	if (ret < 0 && !err)
3769		err = ret;
3770	btrfs_free_block_rsv(fs_info, rc->block_rsv);
3771	btrfs_free_path(path);
3772	return err;
3773}
3774
3775static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3776				 struct btrfs_root *root, u64 objectid)
3777{
3778	struct btrfs_path *path;
3779	struct btrfs_inode_item *item;
3780	struct extent_buffer *leaf;
 
3781	int ret;
3782
 
 
 
3783	path = btrfs_alloc_path();
3784	if (!path)
3785		return -ENOMEM;
3786
3787	ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3788	if (ret)
3789		goto out;
3790
3791	leaf = path->nodes[0];
3792	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3793	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3794	btrfs_set_inode_generation(leaf, item, 1);
3795	btrfs_set_inode_size(leaf, item, 0);
3796	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3797	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3798					  BTRFS_INODE_PREALLOC);
3799	btrfs_mark_buffer_dirty(leaf);
3800out:
3801	btrfs_free_path(path);
3802	return ret;
3803}
3804
3805static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3806				struct btrfs_root *root, u64 objectid)
3807{
3808	struct btrfs_path *path;
3809	struct btrfs_key key;
3810	int ret = 0;
3811
3812	path = btrfs_alloc_path();
3813	if (!path) {
3814		ret = -ENOMEM;
3815		goto out;
3816	}
3817
3818	key.objectid = objectid;
3819	key.type = BTRFS_INODE_ITEM_KEY;
3820	key.offset = 0;
3821	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3822	if (ret) {
3823		if (ret > 0)
3824			ret = -ENOENT;
3825		goto out;
3826	}
3827	ret = btrfs_del_item(trans, root, path);
3828out:
3829	if (ret)
3830		btrfs_abort_transaction(trans, ret);
3831	btrfs_free_path(path);
3832}
3833
3834/*
3835 * helper to create inode for data relocation.
3836 * the inode is in data relocation tree and its link count is 0
3837 */
3838static noinline_for_stack
3839struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3840				 struct btrfs_block_group *group)
3841{
3842	struct inode *inode = NULL;
3843	struct btrfs_trans_handle *trans;
3844	struct btrfs_root *root;
3845	u64 objectid;
3846	int err = 0;
3847
3848	root = btrfs_grab_root(fs_info->data_reloc_root);
3849	trans = btrfs_start_transaction(root, 6);
3850	if (IS_ERR(trans)) {
3851		btrfs_put_root(root);
3852		return ERR_CAST(trans);
3853	}
3854
3855	err = btrfs_get_free_objectid(root, &objectid);
3856	if (err)
3857		goto out;
3858
3859	err = __insert_orphan_inode(trans, root, objectid);
3860	if (err)
3861		goto out;
3862
3863	inode = btrfs_iget(fs_info->sb, objectid, root);
3864	if (IS_ERR(inode)) {
3865		delete_orphan_inode(trans, root, objectid);
3866		err = PTR_ERR(inode);
3867		inode = NULL;
3868		goto out;
3869	}
3870	BTRFS_I(inode)->index_cnt = group->start;
3871
3872	err = btrfs_orphan_add(trans, BTRFS_I(inode));
3873out:
3874	btrfs_put_root(root);
3875	btrfs_end_transaction(trans);
3876	btrfs_btree_balance_dirty(fs_info);
3877	if (err) {
3878		iput(inode);
 
3879		inode = ERR_PTR(err);
3880	}
3881	return inode;
3882}
3883
3884/*
3885 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3886 * has been requested meanwhile and don't start in that case.
3887 *
3888 * Return:
3889 *   0             success
3890 *   -EINPROGRESS  operation is already in progress, that's probably a bug
3891 *   -ECANCELED    cancellation request was set before the operation started
 
3892 */
3893static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3894{
 
 
 
 
 
 
 
 
3895	if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3896		/* This should not happen */
 
3897		btrfs_err(fs_info, "reloc already running, cannot start");
3898		return -EINPROGRESS;
3899	}
 
3900
3901	if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3902		btrfs_info(fs_info, "chunk relocation canceled on start");
3903		/*
3904		 * On cancel, clear all requests but let the caller mark
3905		 * the end after cleanup operations.
3906		 */
3907		atomic_set(&fs_info->reloc_cancel_req, 0);
3908		return -ECANCELED;
3909	}
3910	return 0;
3911}
3912
3913/*
3914 * Mark end of chunk relocation that is cancellable and wake any waiters.
3915 */
3916static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3917{
3918	/* Requested after start, clear bit first so any waiters can continue */
3919	if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3920		btrfs_info(fs_info, "chunk relocation canceled during operation");
 
3921	clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
 
3922	atomic_set(&fs_info->reloc_cancel_req, 0);
3923}
3924
3925static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3926{
3927	struct reloc_control *rc;
3928
3929	rc = kzalloc(sizeof(*rc), GFP_NOFS);
3930	if (!rc)
3931		return NULL;
3932
3933	INIT_LIST_HEAD(&rc->reloc_roots);
3934	INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3935	btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3936	mapping_tree_init(&rc->reloc_root_tree);
3937	extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
 
3938	return rc;
3939}
3940
3941static void free_reloc_control(struct reloc_control *rc)
3942{
3943	struct mapping_node *node, *tmp;
3944
3945	free_reloc_roots(&rc->reloc_roots);
3946	rbtree_postorder_for_each_entry_safe(node, tmp,
3947			&rc->reloc_root_tree.rb_root, rb_node)
3948		kfree(node);
3949
3950	kfree(rc);
3951}
3952
3953/*
3954 * Print the block group being relocated
3955 */
3956static void describe_relocation(struct btrfs_fs_info *fs_info,
3957				struct btrfs_block_group *block_group)
3958{
3959	char buf[128] = {'\0'};
3960
3961	btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3962
3963	btrfs_info(fs_info,
3964		   "relocating block group %llu flags %s",
3965		   block_group->start, buf);
3966}
3967
3968static const char *stage_to_string(int stage)
3969{
3970	if (stage == MOVE_DATA_EXTENTS)
3971		return "move data extents";
3972	if (stage == UPDATE_DATA_PTRS)
3973		return "update data pointers";
3974	return "unknown";
3975}
3976
3977/*
3978 * function to relocate all extents in a block group.
3979 */
3980int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3981{
3982	struct btrfs_block_group *bg;
3983	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3984	struct reloc_control *rc;
3985	struct inode *inode;
3986	struct btrfs_path *path;
3987	int ret;
3988	int rw = 0;
3989	int err = 0;
3990
3991	/*
3992	 * This only gets set if we had a half-deleted snapshot on mount.  We
3993	 * cannot allow relocation to start while we're still trying to clean up
3994	 * these pending deletions.
3995	 */
3996	ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
3997	if (ret)
3998		return ret;
3999
4000	/* We may have been woken up by close_ctree, so bail if we're closing. */
4001	if (btrfs_fs_closing(fs_info))
4002		return -EINTR;
4003
4004	bg = btrfs_lookup_block_group(fs_info, group_start);
4005	if (!bg)
4006		return -ENOENT;
4007
4008	/*
4009	 * Relocation of a data block group creates ordered extents.  Without
4010	 * sb_start_write(), we can freeze the filesystem while unfinished
4011	 * ordered extents are left. Such ordered extents can cause a deadlock
4012	 * e.g. when syncfs() is waiting for their completion but they can't
4013	 * finish because they block when joining a transaction, due to the
4014	 * fact that the freeze locks are being held in write mode.
4015	 */
4016	if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
4017		ASSERT(sb_write_started(fs_info->sb));
4018
4019	if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4020		btrfs_put_block_group(bg);
4021		return -ETXTBSY;
4022	}
4023
4024	rc = alloc_reloc_control(fs_info);
4025	if (!rc) {
4026		btrfs_put_block_group(bg);
4027		return -ENOMEM;
4028	}
4029
4030	ret = reloc_chunk_start(fs_info);
4031	if (ret < 0) {
4032		err = ret;
4033		goto out_put_bg;
4034	}
4035
4036	rc->extent_root = extent_root;
4037	rc->block_group = bg;
4038
4039	ret = btrfs_inc_block_group_ro(rc->block_group, true);
4040	if (ret) {
4041		err = ret;
4042		goto out;
4043	}
4044	rw = 1;
4045
4046	path = btrfs_alloc_path();
4047	if (!path) {
4048		err = -ENOMEM;
4049		goto out;
4050	}
4051
4052	inode = lookup_free_space_inode(rc->block_group, path);
4053	btrfs_free_path(path);
4054
4055	if (!IS_ERR(inode))
4056		ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4057	else
4058		ret = PTR_ERR(inode);
4059
4060	if (ret && ret != -ENOENT) {
4061		err = ret;
4062		goto out;
4063	}
4064
4065	rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4066	if (IS_ERR(rc->data_inode)) {
4067		err = PTR_ERR(rc->data_inode);
4068		rc->data_inode = NULL;
4069		goto out;
4070	}
4071
4072	describe_relocation(fs_info, rc->block_group);
4073
4074	btrfs_wait_block_group_reservations(rc->block_group);
4075	btrfs_wait_nocow_writers(rc->block_group);
4076	btrfs_wait_ordered_roots(fs_info, U64_MAX,
4077				 rc->block_group->start,
4078				 rc->block_group->length);
4079
4080	ret = btrfs_zone_finish(rc->block_group);
4081	WARN_ON(ret && ret != -EAGAIN);
4082
4083	while (1) {
4084		int finishes_stage;
4085
4086		mutex_lock(&fs_info->cleaner_mutex);
4087		ret = relocate_block_group(rc);
4088		mutex_unlock(&fs_info->cleaner_mutex);
4089		if (ret < 0)
4090			err = ret;
4091
4092		finishes_stage = rc->stage;
4093		/*
4094		 * We may have gotten ENOSPC after we already dirtied some
4095		 * extents.  If writeout happens while we're relocating a
4096		 * different block group we could end up hitting the
4097		 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4098		 * btrfs_reloc_cow_block.  Make sure we write everything out
4099		 * properly so we don't trip over this problem, and then break
4100		 * out of the loop if we hit an error.
4101		 */
4102		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4103			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4104						       (u64)-1);
4105			if (ret)
4106				err = ret;
4107			invalidate_mapping_pages(rc->data_inode->i_mapping,
4108						 0, -1);
4109			rc->stage = UPDATE_DATA_PTRS;
4110		}
4111
4112		if (err < 0)
4113			goto out;
4114
4115		if (rc->extents_found == 0)
4116			break;
4117
4118		btrfs_info(fs_info, "found %llu extents, stage: %s",
4119			   rc->extents_found, stage_to_string(finishes_stage));
4120	}
4121
4122	WARN_ON(rc->block_group->pinned > 0);
4123	WARN_ON(rc->block_group->reserved > 0);
4124	WARN_ON(rc->block_group->used > 0);
4125out:
4126	if (err && rw)
4127		btrfs_dec_block_group_ro(rc->block_group);
4128	iput(rc->data_inode);
4129out_put_bg:
4130	btrfs_put_block_group(bg);
4131	reloc_chunk_end(fs_info);
4132	free_reloc_control(rc);
4133	return err;
4134}
4135
4136static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4137{
4138	struct btrfs_fs_info *fs_info = root->fs_info;
4139	struct btrfs_trans_handle *trans;
4140	int ret, err;
4141
4142	trans = btrfs_start_transaction(fs_info->tree_root, 0);
4143	if (IS_ERR(trans))
4144		return PTR_ERR(trans);
4145
4146	memset(&root->root_item.drop_progress, 0,
4147		sizeof(root->root_item.drop_progress));
4148	btrfs_set_root_drop_level(&root->root_item, 0);
4149	btrfs_set_root_refs(&root->root_item, 0);
4150	ret = btrfs_update_root(trans, fs_info->tree_root,
4151				&root->root_key, &root->root_item);
4152
4153	err = btrfs_end_transaction(trans);
4154	if (err)
4155		return err;
4156	return ret;
4157}
4158
4159/*
4160 * recover relocation interrupted by system crash.
4161 *
4162 * this function resumes merging reloc trees with corresponding fs trees.
4163 * this is important for keeping the sharing of tree blocks
4164 */
4165int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4166{
 
4167	LIST_HEAD(reloc_roots);
4168	struct btrfs_key key;
4169	struct btrfs_root *fs_root;
4170	struct btrfs_root *reloc_root;
4171	struct btrfs_path *path;
4172	struct extent_buffer *leaf;
4173	struct reloc_control *rc = NULL;
4174	struct btrfs_trans_handle *trans;
4175	int ret;
4176	int err = 0;
4177
4178	path = btrfs_alloc_path();
4179	if (!path)
4180		return -ENOMEM;
4181	path->reada = READA_BACK;
4182
4183	key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4184	key.type = BTRFS_ROOT_ITEM_KEY;
4185	key.offset = (u64)-1;
4186
4187	while (1) {
4188		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4189					path, 0, 0);
4190		if (ret < 0) {
4191			err = ret;
4192			goto out;
4193		}
4194		if (ret > 0) {
4195			if (path->slots[0] == 0)
4196				break;
4197			path->slots[0]--;
4198		}
4199		leaf = path->nodes[0];
4200		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4201		btrfs_release_path(path);
4202
4203		if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4204		    key.type != BTRFS_ROOT_ITEM_KEY)
4205			break;
4206
4207		reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4208		if (IS_ERR(reloc_root)) {
4209			err = PTR_ERR(reloc_root);
4210			goto out;
4211		}
4212
4213		set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4214		list_add(&reloc_root->root_list, &reloc_roots);
4215
4216		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4217			fs_root = btrfs_get_fs_root(fs_info,
4218					reloc_root->root_key.offset, false);
4219			if (IS_ERR(fs_root)) {
4220				ret = PTR_ERR(fs_root);
4221				if (ret != -ENOENT) {
4222					err = ret;
4223					goto out;
4224				}
4225				ret = mark_garbage_root(reloc_root);
4226				if (ret < 0) {
4227					err = ret;
4228					goto out;
4229				}
4230			} else {
4231				btrfs_put_root(fs_root);
4232			}
4233		}
4234
4235		if (key.offset == 0)
4236			break;
4237
4238		key.offset--;
4239	}
4240	btrfs_release_path(path);
4241
4242	if (list_empty(&reloc_roots))
4243		goto out;
4244
4245	rc = alloc_reloc_control(fs_info);
4246	if (!rc) {
4247		err = -ENOMEM;
4248		goto out;
4249	}
4250
4251	ret = reloc_chunk_start(fs_info);
4252	if (ret < 0) {
4253		err = ret;
4254		goto out_end;
4255	}
4256
4257	rc->extent_root = btrfs_extent_root(fs_info, 0);
4258
4259	set_reloc_control(rc);
4260
4261	trans = btrfs_join_transaction(rc->extent_root);
4262	if (IS_ERR(trans)) {
4263		err = PTR_ERR(trans);
4264		goto out_unset;
4265	}
4266
4267	rc->merge_reloc_tree = 1;
4268
4269	while (!list_empty(&reloc_roots)) {
4270		reloc_root = list_entry(reloc_roots.next,
4271					struct btrfs_root, root_list);
4272		list_del(&reloc_root->root_list);
4273
4274		if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4275			list_add_tail(&reloc_root->root_list,
4276				      &rc->reloc_roots);
4277			continue;
4278		}
4279
4280		fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4281					    false);
4282		if (IS_ERR(fs_root)) {
4283			err = PTR_ERR(fs_root);
4284			list_add_tail(&reloc_root->root_list, &reloc_roots);
4285			btrfs_end_transaction(trans);
4286			goto out_unset;
4287		}
4288
4289		err = __add_reloc_root(reloc_root);
4290		ASSERT(err != -EEXIST);
4291		if (err) {
4292			list_add_tail(&reloc_root->root_list, &reloc_roots);
4293			btrfs_put_root(fs_root);
4294			btrfs_end_transaction(trans);
4295			goto out_unset;
4296		}
4297		fs_root->reloc_root = btrfs_grab_root(reloc_root);
4298		btrfs_put_root(fs_root);
4299	}
4300
4301	err = btrfs_commit_transaction(trans);
4302	if (err)
4303		goto out_unset;
4304
4305	merge_reloc_roots(rc);
4306
4307	unset_reloc_control(rc);
4308
4309	trans = btrfs_join_transaction(rc->extent_root);
4310	if (IS_ERR(trans)) {
4311		err = PTR_ERR(trans);
4312		goto out_clean;
4313	}
4314	err = btrfs_commit_transaction(trans);
4315out_clean:
4316	ret = clean_dirty_subvols(rc);
4317	if (ret < 0 && !err)
4318		err = ret;
4319out_unset:
4320	unset_reloc_control(rc);
4321out_end:
4322	reloc_chunk_end(fs_info);
4323	free_reloc_control(rc);
4324out:
4325	free_reloc_roots(&reloc_roots);
4326
4327	btrfs_free_path(path);
4328
4329	if (err == 0) {
4330		/* cleanup orphan inode in data relocation tree */
4331		fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4332		ASSERT(fs_root);
4333		err = btrfs_orphan_cleanup(fs_root);
4334		btrfs_put_root(fs_root);
4335	}
4336	return err;
4337}
4338
4339/*
4340 * helper to add ordered checksum for data relocation.
4341 *
4342 * cloning checksum properly handles the nodatasum extents.
4343 * it also saves CPU time to re-calculate the checksum.
4344 */
4345int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
4346{
4347	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4348	struct btrfs_root *csum_root;
4349	struct btrfs_ordered_sum *sums;
4350	struct btrfs_ordered_extent *ordered;
4351	int ret;
4352	u64 disk_bytenr;
4353	u64 new_bytenr;
4354	LIST_HEAD(list);
4355
4356	ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4357	BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4358
4359	disk_bytenr = file_pos + inode->index_cnt;
4360	csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4361	ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
4362				      disk_bytenr + len - 1, &list, 0, false);
4363	if (ret)
4364		goto out;
4365
4366	while (!list_empty(&list)) {
4367		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4368		list_del_init(&sums->list);
4369
4370		/*
4371		 * We need to offset the new_bytenr based on where the csum is.
4372		 * We need to do this because we will read in entire prealloc
4373		 * extents but we may have written to say the middle of the
4374		 * prealloc extent, so we need to make sure the csum goes with
4375		 * the right disk offset.
4376		 *
4377		 * We can do this because the data reloc inode refers strictly
4378		 * to the on disk bytes, so we don't have to worry about
4379		 * disk_len vs real len like with real inodes since it's all
4380		 * disk length.
4381		 */
4382		new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4383		sums->bytenr = new_bytenr;
4384
4385		btrfs_add_ordered_sum(ordered, sums);
4386	}
4387out:
4388	btrfs_put_ordered_extent(ordered);
4389	return ret;
4390}
4391
4392int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4393			  struct btrfs_root *root, struct extent_buffer *buf,
4394			  struct extent_buffer *cow)
4395{
4396	struct btrfs_fs_info *fs_info = root->fs_info;
4397	struct reloc_control *rc;
4398	struct btrfs_backref_node *node;
4399	int first_cow = 0;
4400	int level;
4401	int ret = 0;
4402
4403	rc = fs_info->reloc_ctl;
4404	if (!rc)
4405		return 0;
4406
4407	BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
 
4408
4409	level = btrfs_header_level(buf);
4410	if (btrfs_header_generation(buf) <=
4411	    btrfs_root_last_snapshot(&root->root_item))
4412		first_cow = 1;
4413
4414	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4415	    rc->create_reloc_tree) {
4416		WARN_ON(!first_cow && level == 0);
4417
4418		node = rc->backref_cache.path[level];
4419		BUG_ON(node->bytenr != buf->start &&
4420		       node->new_bytenr != buf->start);
4421
4422		btrfs_backref_drop_node_buffer(node);
4423		atomic_inc(&cow->refs);
4424		node->eb = cow;
4425		node->new_bytenr = cow->start;
4426
4427		if (!node->pending) {
4428			list_move_tail(&node->list,
4429				       &rc->backref_cache.pending[level]);
4430			node->pending = 1;
4431		}
4432
4433		if (first_cow)
4434			mark_block_processed(rc, node);
4435
4436		if (first_cow && level > 0)
4437			rc->nodes_relocated += buf->len;
4438	}
4439
4440	if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4441		ret = replace_file_extents(trans, rc, root, cow);
4442	return ret;
4443}
4444
4445/*
4446 * called before creating snapshot. it calculates metadata reservation
4447 * required for relocating tree blocks in the snapshot
4448 */
4449void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4450			      u64 *bytes_to_reserve)
4451{
4452	struct btrfs_root *root = pending->root;
4453	struct reloc_control *rc = root->fs_info->reloc_ctl;
4454
4455	if (!rc || !have_reloc_root(root))
4456		return;
4457
4458	if (!rc->merge_reloc_tree)
4459		return;
4460
4461	root = root->reloc_root;
4462	BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4463	/*
4464	 * relocation is in the stage of merging trees. the space
4465	 * used by merging a reloc tree is twice the size of
4466	 * relocated tree nodes in the worst case. half for cowing
4467	 * the reloc tree, half for cowing the fs tree. the space
4468	 * used by cowing the reloc tree will be freed after the
4469	 * tree is dropped. if we create snapshot, cowing the fs
4470	 * tree may use more space than it frees. so we need
4471	 * reserve extra space.
4472	 */
4473	*bytes_to_reserve += rc->nodes_relocated;
4474}
4475
4476/*
4477 * called after snapshot is created. migrate block reservation
4478 * and create reloc root for the newly created snapshot
4479 *
4480 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4481 * references held on the reloc_root, one for root->reloc_root and one for
4482 * rc->reloc_roots.
4483 */
4484int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4485			       struct btrfs_pending_snapshot *pending)
4486{
4487	struct btrfs_root *root = pending->root;
4488	struct btrfs_root *reloc_root;
4489	struct btrfs_root *new_root;
4490	struct reloc_control *rc = root->fs_info->reloc_ctl;
4491	int ret;
4492
4493	if (!rc || !have_reloc_root(root))
4494		return 0;
4495
4496	rc = root->fs_info->reloc_ctl;
4497	rc->merging_rsv_size += rc->nodes_relocated;
4498
4499	if (rc->merge_reloc_tree) {
4500		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4501					      rc->block_rsv,
4502					      rc->nodes_relocated, true);
4503		if (ret)
4504			return ret;
4505	}
4506
4507	new_root = pending->snap;
4508	reloc_root = create_reloc_root(trans, root->reloc_root,
4509				       new_root->root_key.objectid);
4510	if (IS_ERR(reloc_root))
4511		return PTR_ERR(reloc_root);
4512
4513	ret = __add_reloc_root(reloc_root);
4514	ASSERT(ret != -EEXIST);
4515	if (ret) {
4516		/* Pairs with create_reloc_root */
4517		btrfs_put_root(reloc_root);
4518		return ret;
4519	}
4520	new_root->reloc_root = btrfs_grab_root(reloc_root);
4521
4522	if (rc->create_reloc_tree)
4523		ret = clone_backref_node(trans, rc, root, reloc_root);
4524	return ret;
4525}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/pagemap.h>
   8#include <linux/writeback.h>
   9#include <linux/blkdev.h>
  10#include <linux/rbtree.h>
  11#include <linux/slab.h>
  12#include <linux/error-injection.h>
  13#include "ctree.h"
  14#include "disk-io.h"
  15#include "transaction.h"
  16#include "volumes.h"
  17#include "locking.h"
  18#include "btrfs_inode.h"
  19#include "async-thread.h"
  20#include "free-space-cache.h"
  21#include "qgroup.h"
  22#include "print-tree.h"
  23#include "delalloc-space.h"
  24#include "block-group.h"
  25#include "backref.h"
  26#include "misc.h"
 
 
 
 
 
 
 
 
 
 
 
 
  27
  28/*
  29 * Relocation overview
  30 *
  31 * [What does relocation do]
  32 *
  33 * The objective of relocation is to relocate all extents of the target block
  34 * group to other block groups.
  35 * This is utilized by resize (shrink only), profile converting, compacting
  36 * space, or balance routine to spread chunks over devices.
  37 *
  38 * 		Before		|		After
  39 * ------------------------------------------------------------------
  40 *  BG A: 10 data extents	| BG A: deleted
  41 *  BG B:  2 data extents	| BG B: 10 data extents (2 old + 8 relocated)
  42 *  BG C:  1 extents		| BG C:  3 data extents (1 old + 2 relocated)
  43 *
  44 * [How does relocation work]
  45 *
  46 * 1.   Mark the target block group read-only
  47 *      New extents won't be allocated from the target block group.
  48 *
  49 * 2.1  Record each extent in the target block group
  50 *      To build a proper map of extents to be relocated.
  51 *
  52 * 2.2  Build data reloc tree and reloc trees
  53 *      Data reloc tree will contain an inode, recording all newly relocated
  54 *      data extents.
  55 *      There will be only one data reloc tree for one data block group.
  56 *
  57 *      Reloc tree will be a special snapshot of its source tree, containing
  58 *      relocated tree blocks.
  59 *      Each tree referring to a tree block in target block group will get its
  60 *      reloc tree built.
  61 *
  62 * 2.3  Swap source tree with its corresponding reloc tree
  63 *      Each involved tree only refers to new extents after swap.
  64 *
  65 * 3.   Cleanup reloc trees and data reloc tree.
  66 *      As old extents in the target block group are still referenced by reloc
  67 *      trees, we need to clean them up before really freeing the target block
  68 *      group.
  69 *
  70 * The main complexity is in steps 2.2 and 2.3.
  71 *
  72 * The entry point of relocation is relocate_block_group() function.
  73 */
  74
  75#define RELOCATION_RESERVED_NODES	256
  76/*
  77 * map address of tree root to tree
  78 */
  79struct mapping_node {
  80	struct {
  81		struct rb_node rb_node;
  82		u64 bytenr;
  83	}; /* Use rb_simle_node for search/insert */
  84	void *data;
  85};
  86
  87struct mapping_tree {
  88	struct rb_root rb_root;
  89	spinlock_t lock;
  90};
  91
  92/*
  93 * present a tree block to process
  94 */
  95struct tree_block {
  96	struct {
  97		struct rb_node rb_node;
  98		u64 bytenr;
  99	}; /* Use rb_simple_node for search/insert */
 100	u64 owner;
 101	struct btrfs_key key;
 102	unsigned int level:8;
 103	unsigned int key_ready:1;
 104};
 105
 106#define MAX_EXTENTS 128
 107
 108struct file_extent_cluster {
 109	u64 start;
 110	u64 end;
 111	u64 boundary[MAX_EXTENTS];
 112	unsigned int nr;
 113};
 114
 115struct reloc_control {
 116	/* block group to relocate */
 117	struct btrfs_block_group *block_group;
 118	/* extent tree */
 119	struct btrfs_root *extent_root;
 120	/* inode for moving data */
 121	struct inode *data_inode;
 122
 123	struct btrfs_block_rsv *block_rsv;
 124
 125	struct btrfs_backref_cache backref_cache;
 126
 127	struct file_extent_cluster cluster;
 128	/* tree blocks have been processed */
 129	struct extent_io_tree processed_blocks;
 130	/* map start of tree root to corresponding reloc tree */
 131	struct mapping_tree reloc_root_tree;
 132	/* list of reloc trees */
 133	struct list_head reloc_roots;
 134	/* list of subvolume trees that get relocated */
 135	struct list_head dirty_subvol_roots;
 136	/* size of metadata reservation for merging reloc trees */
 137	u64 merging_rsv_size;
 138	/* size of relocated tree nodes */
 139	u64 nodes_relocated;
 140	/* reserved size for block group relocation*/
 141	u64 reserved_bytes;
 142
 143	u64 search_start;
 144	u64 extents_found;
 145
 146	unsigned int stage:8;
 147	unsigned int create_reloc_tree:1;
 148	unsigned int merge_reloc_tree:1;
 149	unsigned int found_file_extent:1;
 150};
 151
 152/* stages of data relocation */
 153#define MOVE_DATA_EXTENTS	0
 154#define UPDATE_DATA_PTRS	1
 155
 156static void mark_block_processed(struct reloc_control *rc,
 157				 struct btrfs_backref_node *node)
 158{
 159	u32 blocksize;
 160
 161	if (node->level == 0 ||
 162	    in_range(node->bytenr, rc->block_group->start,
 163		     rc->block_group->length)) {
 164		blocksize = rc->extent_root->fs_info->nodesize;
 165		set_extent_bits(&rc->processed_blocks, node->bytenr,
 166				node->bytenr + blocksize - 1, EXTENT_DIRTY);
 167	}
 168	node->processed = 1;
 169}
 170
 171
 172static void mapping_tree_init(struct mapping_tree *tree)
 173{
 174	tree->rb_root = RB_ROOT;
 175	spin_lock_init(&tree->lock);
 176}
 177
 178/*
 179 * walk up backref nodes until reach node presents tree root
 180 */
 181static struct btrfs_backref_node *walk_up_backref(
 182		struct btrfs_backref_node *node,
 183		struct btrfs_backref_edge *edges[], int *index)
 184{
 185	struct btrfs_backref_edge *edge;
 186	int idx = *index;
 187
 188	while (!list_empty(&node->upper)) {
 189		edge = list_entry(node->upper.next,
 190				  struct btrfs_backref_edge, list[LOWER]);
 191		edges[idx++] = edge;
 192		node = edge->node[UPPER];
 193	}
 194	BUG_ON(node->detached);
 195	*index = idx;
 196	return node;
 197}
 198
 199/*
 200 * walk down backref nodes to find start of next reference path
 201 */
 202static struct btrfs_backref_node *walk_down_backref(
 203		struct btrfs_backref_edge *edges[], int *index)
 204{
 205	struct btrfs_backref_edge *edge;
 206	struct btrfs_backref_node *lower;
 207	int idx = *index;
 208
 209	while (idx > 0) {
 210		edge = edges[idx - 1];
 211		lower = edge->node[LOWER];
 212		if (list_is_last(&edge->list[LOWER], &lower->upper)) {
 213			idx--;
 214			continue;
 215		}
 216		edge = list_entry(edge->list[LOWER].next,
 217				  struct btrfs_backref_edge, list[LOWER]);
 218		edges[idx - 1] = edge;
 219		*index = idx;
 220		return edge->node[UPPER];
 221	}
 222	*index = 0;
 223	return NULL;
 224}
 225
 226static void update_backref_node(struct btrfs_backref_cache *cache,
 227				struct btrfs_backref_node *node, u64 bytenr)
 228{
 229	struct rb_node *rb_node;
 230	rb_erase(&node->rb_node, &cache->rb_root);
 231	node->bytenr = bytenr;
 232	rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
 233	if (rb_node)
 234		btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
 235}
 236
 237/*
 238 * update backref cache after a transaction commit
 239 */
 240static int update_backref_cache(struct btrfs_trans_handle *trans,
 241				struct btrfs_backref_cache *cache)
 242{
 243	struct btrfs_backref_node *node;
 244	int level = 0;
 245
 246	if (cache->last_trans == 0) {
 247		cache->last_trans = trans->transid;
 248		return 0;
 249	}
 250
 251	if (cache->last_trans == trans->transid)
 252		return 0;
 253
 254	/*
 255	 * detached nodes are used to avoid unnecessary backref
 256	 * lookup. transaction commit changes the extent tree.
 257	 * so the detached nodes are no longer useful.
 258	 */
 259	while (!list_empty(&cache->detached)) {
 260		node = list_entry(cache->detached.next,
 261				  struct btrfs_backref_node, list);
 262		btrfs_backref_cleanup_node(cache, node);
 263	}
 264
 265	while (!list_empty(&cache->changed)) {
 266		node = list_entry(cache->changed.next,
 267				  struct btrfs_backref_node, list);
 268		list_del_init(&node->list);
 269		BUG_ON(node->pending);
 270		update_backref_node(cache, node, node->new_bytenr);
 271	}
 272
 273	/*
 274	 * some nodes can be left in the pending list if there were
 275	 * errors during processing the pending nodes.
 276	 */
 277	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
 278		list_for_each_entry(node, &cache->pending[level], list) {
 279			BUG_ON(!node->pending);
 280			if (node->bytenr == node->new_bytenr)
 281				continue;
 282			update_backref_node(cache, node, node->new_bytenr);
 283		}
 284	}
 285
 286	cache->last_trans = 0;
 287	return 1;
 288}
 289
 290static bool reloc_root_is_dead(struct btrfs_root *root)
 291{
 292	/*
 293	 * Pair with set_bit/clear_bit in clean_dirty_subvols and
 294	 * btrfs_update_reloc_root. We need to see the updated bit before
 295	 * trying to access reloc_root
 296	 */
 297	smp_rmb();
 298	if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
 299		return true;
 300	return false;
 301}
 302
 303/*
 304 * Check if this subvolume tree has valid reloc tree.
 305 *
 306 * Reloc tree after swap is considered dead, thus not considered as valid.
 307 * This is enough for most callers, as they don't distinguish dead reloc root
 308 * from no reloc root.  But btrfs_should_ignore_reloc_root() below is a
 309 * special case.
 310 */
 311static bool have_reloc_root(struct btrfs_root *root)
 312{
 313	if (reloc_root_is_dead(root))
 314		return false;
 315	if (!root->reloc_root)
 316		return false;
 317	return true;
 318}
 319
 320int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
 321{
 322	struct btrfs_root *reloc_root;
 323
 324	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
 325		return 0;
 326
 327	/* This root has been merged with its reloc tree, we can ignore it */
 328	if (reloc_root_is_dead(root))
 329		return 1;
 330
 331	reloc_root = root->reloc_root;
 332	if (!reloc_root)
 333		return 0;
 334
 335	if (btrfs_header_generation(reloc_root->commit_root) ==
 336	    root->fs_info->running_transaction->transid)
 337		return 0;
 338	/*
 339	 * if there is reloc tree and it was created in previous
 340	 * transaction backref lookup can find the reloc tree,
 341	 * so backref node for the fs tree root is useless for
 342	 * relocation.
 343	 */
 344	return 1;
 345}
 346
 347/*
 348 * find reloc tree by address of tree root
 349 */
 350struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
 351{
 352	struct reloc_control *rc = fs_info->reloc_ctl;
 353	struct rb_node *rb_node;
 354	struct mapping_node *node;
 355	struct btrfs_root *root = NULL;
 356
 357	ASSERT(rc);
 358	spin_lock(&rc->reloc_root_tree.lock);
 359	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
 360	if (rb_node) {
 361		node = rb_entry(rb_node, struct mapping_node, rb_node);
 362		root = (struct btrfs_root *)node->data;
 363	}
 364	spin_unlock(&rc->reloc_root_tree.lock);
 365	return btrfs_grab_root(root);
 366}
 367
 368/*
 369 * For useless nodes, do two major clean ups:
 370 *
 371 * - Cleanup the children edges and nodes
 372 *   If child node is also orphan (no parent) during cleanup, then the child
 373 *   node will also be cleaned up.
 374 *
 375 * - Freeing up leaves (level 0), keeps nodes detached
 376 *   For nodes, the node is still cached as "detached"
 377 *
 378 * Return false if @node is not in the @useless_nodes list.
 379 * Return true if @node is in the @useless_nodes list.
 380 */
 381static bool handle_useless_nodes(struct reloc_control *rc,
 382				 struct btrfs_backref_node *node)
 383{
 384	struct btrfs_backref_cache *cache = &rc->backref_cache;
 385	struct list_head *useless_node = &cache->useless_node;
 386	bool ret = false;
 387
 388	while (!list_empty(useless_node)) {
 389		struct btrfs_backref_node *cur;
 390
 391		cur = list_first_entry(useless_node, struct btrfs_backref_node,
 392				 list);
 393		list_del_init(&cur->list);
 394
 395		/* Only tree root nodes can be added to @useless_nodes */
 396		ASSERT(list_empty(&cur->upper));
 397
 398		if (cur == node)
 399			ret = true;
 400
 401		/* The node is the lowest node */
 402		if (cur->lowest) {
 403			list_del_init(&cur->lower);
 404			cur->lowest = 0;
 405		}
 406
 407		/* Cleanup the lower edges */
 408		while (!list_empty(&cur->lower)) {
 409			struct btrfs_backref_edge *edge;
 410			struct btrfs_backref_node *lower;
 411
 412			edge = list_entry(cur->lower.next,
 413					struct btrfs_backref_edge, list[UPPER]);
 414			list_del(&edge->list[UPPER]);
 415			list_del(&edge->list[LOWER]);
 416			lower = edge->node[LOWER];
 417			btrfs_backref_free_edge(cache, edge);
 418
 419			/* Child node is also orphan, queue for cleanup */
 420			if (list_empty(&lower->upper))
 421				list_add(&lower->list, useless_node);
 422		}
 423		/* Mark this block processed for relocation */
 424		mark_block_processed(rc, cur);
 425
 426		/*
 427		 * Backref nodes for tree leaves are deleted from the cache.
 428		 * Backref nodes for upper level tree blocks are left in the
 429		 * cache to avoid unnecessary backref lookup.
 430		 */
 431		if (cur->level > 0) {
 432			list_add(&cur->list, &cache->detached);
 433			cur->detached = 1;
 434		} else {
 435			rb_erase(&cur->rb_node, &cache->rb_root);
 436			btrfs_backref_free_node(cache, cur);
 437		}
 438	}
 439	return ret;
 440}
 441
 442/*
 443 * Build backref tree for a given tree block. Root of the backref tree
 444 * corresponds the tree block, leaves of the backref tree correspond roots of
 445 * b-trees that reference the tree block.
 446 *
 447 * The basic idea of this function is check backrefs of a given block to find
 448 * upper level blocks that reference the block, and then check backrefs of
 449 * these upper level blocks recursively. The recursion stops when tree root is
 450 * reached or backrefs for the block is cached.
 451 *
 452 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
 453 * all upper level blocks that directly/indirectly reference the block are also
 454 * cached.
 455 */
 456static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
 457			struct reloc_control *rc, struct btrfs_key *node_key,
 458			int level, u64 bytenr)
 459{
 460	struct btrfs_backref_iter *iter;
 461	struct btrfs_backref_cache *cache = &rc->backref_cache;
 462	/* For searching parent of TREE_BLOCK_REF */
 463	struct btrfs_path *path;
 464	struct btrfs_backref_node *cur;
 465	struct btrfs_backref_node *node = NULL;
 466	struct btrfs_backref_edge *edge;
 467	int ret;
 468	int err = 0;
 469
 470	iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
 471	if (!iter)
 472		return ERR_PTR(-ENOMEM);
 473	path = btrfs_alloc_path();
 474	if (!path) {
 475		err = -ENOMEM;
 476		goto out;
 477	}
 478
 479	node = btrfs_backref_alloc_node(cache, bytenr, level);
 480	if (!node) {
 481		err = -ENOMEM;
 482		goto out;
 483	}
 484
 485	node->lowest = 1;
 486	cur = node;
 487
 488	/* Breadth-first search to build backref cache */
 489	do {
 490		ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
 491						  cur);
 492		if (ret < 0) {
 493			err = ret;
 494			goto out;
 495		}
 496		edge = list_first_entry_or_null(&cache->pending_edge,
 497				struct btrfs_backref_edge, list[UPPER]);
 498		/*
 499		 * The pending list isn't empty, take the first block to
 500		 * process
 501		 */
 502		if (edge) {
 503			list_del_init(&edge->list[UPPER]);
 504			cur = edge->node[UPPER];
 505		}
 506	} while (edge);
 507
 508	/* Finish the upper linkage of newly added edges/nodes */
 509	ret = btrfs_backref_finish_upper_links(cache, node);
 510	if (ret < 0) {
 511		err = ret;
 512		goto out;
 513	}
 514
 515	if (handle_useless_nodes(rc, node))
 516		node = NULL;
 517out:
 518	btrfs_backref_iter_free(iter);
 519	btrfs_free_path(path);
 520	if (err) {
 521		btrfs_backref_error_cleanup(cache, node);
 522		return ERR_PTR(err);
 523	}
 524	ASSERT(!node || !node->detached);
 525	ASSERT(list_empty(&cache->useless_node) &&
 526	       list_empty(&cache->pending_edge));
 527	return node;
 528}
 529
 530/*
 531 * helper to add backref node for the newly created snapshot.
 532 * the backref node is created by cloning backref node that
 533 * corresponds to root of source tree
 534 */
 535static int clone_backref_node(struct btrfs_trans_handle *trans,
 536			      struct reloc_control *rc,
 537			      struct btrfs_root *src,
 538			      struct btrfs_root *dest)
 539{
 540	struct btrfs_root *reloc_root = src->reloc_root;
 541	struct btrfs_backref_cache *cache = &rc->backref_cache;
 542	struct btrfs_backref_node *node = NULL;
 543	struct btrfs_backref_node *new_node;
 544	struct btrfs_backref_edge *edge;
 545	struct btrfs_backref_edge *new_edge;
 546	struct rb_node *rb_node;
 547
 548	if (cache->last_trans > 0)
 549		update_backref_cache(trans, cache);
 550
 551	rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
 552	if (rb_node) {
 553		node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
 554		if (node->detached)
 555			node = NULL;
 556		else
 557			BUG_ON(node->new_bytenr != reloc_root->node->start);
 558	}
 559
 560	if (!node) {
 561		rb_node = rb_simple_search(&cache->rb_root,
 562					   reloc_root->commit_root->start);
 563		if (rb_node) {
 564			node = rb_entry(rb_node, struct btrfs_backref_node,
 565					rb_node);
 566			BUG_ON(node->detached);
 567		}
 568	}
 569
 570	if (!node)
 571		return 0;
 572
 573	new_node = btrfs_backref_alloc_node(cache, dest->node->start,
 574					    node->level);
 575	if (!new_node)
 576		return -ENOMEM;
 577
 578	new_node->lowest = node->lowest;
 579	new_node->checked = 1;
 580	new_node->root = btrfs_grab_root(dest);
 581	ASSERT(new_node->root);
 582
 583	if (!node->lowest) {
 584		list_for_each_entry(edge, &node->lower, list[UPPER]) {
 585			new_edge = btrfs_backref_alloc_edge(cache);
 586			if (!new_edge)
 587				goto fail;
 588
 589			btrfs_backref_link_edge(new_edge, edge->node[LOWER],
 590						new_node, LINK_UPPER);
 591		}
 592	} else {
 593		list_add_tail(&new_node->lower, &cache->leaves);
 594	}
 595
 596	rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
 597				   &new_node->rb_node);
 598	if (rb_node)
 599		btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
 600
 601	if (!new_node->lowest) {
 602		list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
 603			list_add_tail(&new_edge->list[LOWER],
 604				      &new_edge->node[LOWER]->upper);
 605		}
 606	}
 607	return 0;
 608fail:
 609	while (!list_empty(&new_node->lower)) {
 610		new_edge = list_entry(new_node->lower.next,
 611				      struct btrfs_backref_edge, list[UPPER]);
 612		list_del(&new_edge->list[UPPER]);
 613		btrfs_backref_free_edge(cache, new_edge);
 614	}
 615	btrfs_backref_free_node(cache, new_node);
 616	return -ENOMEM;
 617}
 618
 619/*
 620 * helper to add 'address of tree root -> reloc tree' mapping
 621 */
 622static int __must_check __add_reloc_root(struct btrfs_root *root)
 623{
 624	struct btrfs_fs_info *fs_info = root->fs_info;
 625	struct rb_node *rb_node;
 626	struct mapping_node *node;
 627	struct reloc_control *rc = fs_info->reloc_ctl;
 628
 629	node = kmalloc(sizeof(*node), GFP_NOFS);
 630	if (!node)
 631		return -ENOMEM;
 632
 633	node->bytenr = root->commit_root->start;
 634	node->data = root;
 635
 636	spin_lock(&rc->reloc_root_tree.lock);
 637	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
 638				   node->bytenr, &node->rb_node);
 639	spin_unlock(&rc->reloc_root_tree.lock);
 640	if (rb_node) {
 641		btrfs_err(fs_info,
 642			    "Duplicate root found for start=%llu while inserting into relocation tree",
 643			    node->bytenr);
 644		return -EEXIST;
 645	}
 646
 647	list_add_tail(&root->root_list, &rc->reloc_roots);
 648	return 0;
 649}
 650
 651/*
 652 * helper to delete the 'address of tree root -> reloc tree'
 653 * mapping
 654 */
 655static void __del_reloc_root(struct btrfs_root *root)
 656{
 657	struct btrfs_fs_info *fs_info = root->fs_info;
 658	struct rb_node *rb_node;
 659	struct mapping_node *node = NULL;
 660	struct reloc_control *rc = fs_info->reloc_ctl;
 661	bool put_ref = false;
 662
 663	if (rc && root->node) {
 664		spin_lock(&rc->reloc_root_tree.lock);
 665		rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
 666					   root->commit_root->start);
 667		if (rb_node) {
 668			node = rb_entry(rb_node, struct mapping_node, rb_node);
 669			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
 670			RB_CLEAR_NODE(&node->rb_node);
 671		}
 672		spin_unlock(&rc->reloc_root_tree.lock);
 673		ASSERT(!node || (struct btrfs_root *)node->data == root);
 674	}
 675
 676	/*
 677	 * We only put the reloc root here if it's on the list.  There's a lot
 678	 * of places where the pattern is to splice the rc->reloc_roots, process
 679	 * the reloc roots, and then add the reloc root back onto
 680	 * rc->reloc_roots.  If we call __del_reloc_root while it's off of the
 681	 * list we don't want the reference being dropped, because the guy
 682	 * messing with the list is in charge of the reference.
 683	 */
 684	spin_lock(&fs_info->trans_lock);
 685	if (!list_empty(&root->root_list)) {
 686		put_ref = true;
 687		list_del_init(&root->root_list);
 688	}
 689	spin_unlock(&fs_info->trans_lock);
 690	if (put_ref)
 691		btrfs_put_root(root);
 692	kfree(node);
 693}
 694
 695/*
 696 * helper to update the 'address of tree root -> reloc tree'
 697 * mapping
 698 */
 699static int __update_reloc_root(struct btrfs_root *root)
 700{
 701	struct btrfs_fs_info *fs_info = root->fs_info;
 702	struct rb_node *rb_node;
 703	struct mapping_node *node = NULL;
 704	struct reloc_control *rc = fs_info->reloc_ctl;
 705
 706	spin_lock(&rc->reloc_root_tree.lock);
 707	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
 708				   root->commit_root->start);
 709	if (rb_node) {
 710		node = rb_entry(rb_node, struct mapping_node, rb_node);
 711		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
 712	}
 713	spin_unlock(&rc->reloc_root_tree.lock);
 714
 715	if (!node)
 716		return 0;
 717	BUG_ON((struct btrfs_root *)node->data != root);
 718
 719	spin_lock(&rc->reloc_root_tree.lock);
 720	node->bytenr = root->node->start;
 721	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
 722				   node->bytenr, &node->rb_node);
 723	spin_unlock(&rc->reloc_root_tree.lock);
 724	if (rb_node)
 725		btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
 726	return 0;
 727}
 728
 729static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
 730					struct btrfs_root *root, u64 objectid)
 731{
 732	struct btrfs_fs_info *fs_info = root->fs_info;
 733	struct btrfs_root *reloc_root;
 734	struct extent_buffer *eb;
 735	struct btrfs_root_item *root_item;
 736	struct btrfs_key root_key;
 737	int ret = 0;
 738	bool must_abort = false;
 739
 740	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
 741	if (!root_item)
 742		return ERR_PTR(-ENOMEM);
 743
 744	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
 745	root_key.type = BTRFS_ROOT_ITEM_KEY;
 746	root_key.offset = objectid;
 747
 748	if (root->root_key.objectid == objectid) {
 749		u64 commit_root_gen;
 750
 751		/* called by btrfs_init_reloc_root */
 752		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
 753				      BTRFS_TREE_RELOC_OBJECTID);
 754		if (ret)
 755			goto fail;
 756
 757		/*
 758		 * Set the last_snapshot field to the generation of the commit
 759		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
 760		 * correctly (returns true) when the relocation root is created
 761		 * either inside the critical section of a transaction commit
 762		 * (through transaction.c:qgroup_account_snapshot()) and when
 763		 * it's created before the transaction commit is started.
 764		 */
 765		commit_root_gen = btrfs_header_generation(root->commit_root);
 766		btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
 767	} else {
 768		/*
 769		 * called by btrfs_reloc_post_snapshot_hook.
 770		 * the source tree is a reloc tree, all tree blocks
 771		 * modified after it was created have RELOC flag
 772		 * set in their headers. so it's OK to not update
 773		 * the 'last_snapshot'.
 774		 */
 775		ret = btrfs_copy_root(trans, root, root->node, &eb,
 776				      BTRFS_TREE_RELOC_OBJECTID);
 777		if (ret)
 778			goto fail;
 779	}
 780
 781	/*
 782	 * We have changed references at this point, we must abort the
 783	 * transaction if anything fails.
 784	 */
 785	must_abort = true;
 786
 787	memcpy(root_item, &root->root_item, sizeof(*root_item));
 788	btrfs_set_root_bytenr(root_item, eb->start);
 789	btrfs_set_root_level(root_item, btrfs_header_level(eb));
 790	btrfs_set_root_generation(root_item, trans->transid);
 791
 792	if (root->root_key.objectid == objectid) {
 793		btrfs_set_root_refs(root_item, 0);
 794		memset(&root_item->drop_progress, 0,
 795		       sizeof(struct btrfs_disk_key));
 796		btrfs_set_root_drop_level(root_item, 0);
 797	}
 798
 799	btrfs_tree_unlock(eb);
 800	free_extent_buffer(eb);
 801
 802	ret = btrfs_insert_root(trans, fs_info->tree_root,
 803				&root_key, root_item);
 804	if (ret)
 805		goto fail;
 806
 807	kfree(root_item);
 808
 809	reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
 810	if (IS_ERR(reloc_root)) {
 811		ret = PTR_ERR(reloc_root);
 812		goto abort;
 813	}
 814	set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
 815	reloc_root->last_trans = trans->transid;
 816	return reloc_root;
 817fail:
 818	kfree(root_item);
 819abort:
 820	if (must_abort)
 821		btrfs_abort_transaction(trans, ret);
 822	return ERR_PTR(ret);
 823}
 824
 825/*
 826 * create reloc tree for a given fs tree. reloc tree is just a
 827 * snapshot of the fs tree with special root objectid.
 828 *
 829 * The reloc_root comes out of here with two references, one for
 830 * root->reloc_root, and another for being on the rc->reloc_roots list.
 831 */
 832int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
 833			  struct btrfs_root *root)
 834{
 835	struct btrfs_fs_info *fs_info = root->fs_info;
 836	struct btrfs_root *reloc_root;
 837	struct reloc_control *rc = fs_info->reloc_ctl;
 838	struct btrfs_block_rsv *rsv;
 839	int clear_rsv = 0;
 840	int ret;
 841
 842	if (!rc)
 843		return 0;
 844
 845	/*
 846	 * The subvolume has reloc tree but the swap is finished, no need to
 847	 * create/update the dead reloc tree
 848	 */
 849	if (reloc_root_is_dead(root))
 850		return 0;
 851
 852	/*
 853	 * This is subtle but important.  We do not do
 854	 * record_root_in_transaction for reloc roots, instead we record their
 855	 * corresponding fs root, and then here we update the last trans for the
 856	 * reloc root.  This means that we have to do this for the entire life
 857	 * of the reloc root, regardless of which stage of the relocation we are
 858	 * in.
 859	 */
 860	if (root->reloc_root) {
 861		reloc_root = root->reloc_root;
 862		reloc_root->last_trans = trans->transid;
 863		return 0;
 864	}
 865
 866	/*
 867	 * We are merging reloc roots, we do not need new reloc trees.  Also
 868	 * reloc trees never need their own reloc tree.
 869	 */
 870	if (!rc->create_reloc_tree ||
 871	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
 872		return 0;
 873
 874	if (!trans->reloc_reserved) {
 875		rsv = trans->block_rsv;
 876		trans->block_rsv = rc->block_rsv;
 877		clear_rsv = 1;
 878	}
 879	reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
 880	if (clear_rsv)
 881		trans->block_rsv = rsv;
 882	if (IS_ERR(reloc_root))
 883		return PTR_ERR(reloc_root);
 884
 885	ret = __add_reloc_root(reloc_root);
 886	ASSERT(ret != -EEXIST);
 887	if (ret) {
 888		/* Pairs with create_reloc_root */
 889		btrfs_put_root(reloc_root);
 890		return ret;
 891	}
 892	root->reloc_root = btrfs_grab_root(reloc_root);
 893	return 0;
 894}
 895
 896/*
 897 * update root item of reloc tree
 898 */
 899int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
 900			    struct btrfs_root *root)
 901{
 902	struct btrfs_fs_info *fs_info = root->fs_info;
 903	struct btrfs_root *reloc_root;
 904	struct btrfs_root_item *root_item;
 905	int ret;
 906
 907	if (!have_reloc_root(root))
 908		return 0;
 909
 910	reloc_root = root->reloc_root;
 911	root_item = &reloc_root->root_item;
 912
 913	/*
 914	 * We are probably ok here, but __del_reloc_root() will drop its ref of
 915	 * the root.  We have the ref for root->reloc_root, but just in case
 916	 * hold it while we update the reloc root.
 917	 */
 918	btrfs_grab_root(reloc_root);
 919
 920	/* root->reloc_root will stay until current relocation finished */
 921	if (fs_info->reloc_ctl->merge_reloc_tree &&
 922	    btrfs_root_refs(root_item) == 0) {
 923		set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
 924		/*
 925		 * Mark the tree as dead before we change reloc_root so
 926		 * have_reloc_root will not touch it from now on.
 927		 */
 928		smp_wmb();
 929		__del_reloc_root(reloc_root);
 930	}
 931
 932	if (reloc_root->commit_root != reloc_root->node) {
 933		__update_reloc_root(reloc_root);
 934		btrfs_set_root_node(root_item, reloc_root->node);
 935		free_extent_buffer(reloc_root->commit_root);
 936		reloc_root->commit_root = btrfs_root_node(reloc_root);
 937	}
 938
 939	ret = btrfs_update_root(trans, fs_info->tree_root,
 940				&reloc_root->root_key, root_item);
 941	btrfs_put_root(reloc_root);
 942	return ret;
 943}
 944
 945/*
 946 * helper to find first cached inode with inode number >= objectid
 947 * in a subvolume
 948 */
 949static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
 950{
 951	struct rb_node *node;
 952	struct rb_node *prev;
 953	struct btrfs_inode *entry;
 954	struct inode *inode;
 955
 956	spin_lock(&root->inode_lock);
 957again:
 958	node = root->inode_tree.rb_node;
 959	prev = NULL;
 960	while (node) {
 961		prev = node;
 962		entry = rb_entry(node, struct btrfs_inode, rb_node);
 963
 964		if (objectid < btrfs_ino(entry))
 965			node = node->rb_left;
 966		else if (objectid > btrfs_ino(entry))
 967			node = node->rb_right;
 968		else
 969			break;
 970	}
 971	if (!node) {
 972		while (prev) {
 973			entry = rb_entry(prev, struct btrfs_inode, rb_node);
 974			if (objectid <= btrfs_ino(entry)) {
 975				node = prev;
 976				break;
 977			}
 978			prev = rb_next(prev);
 979		}
 980	}
 981	while (node) {
 982		entry = rb_entry(node, struct btrfs_inode, rb_node);
 983		inode = igrab(&entry->vfs_inode);
 984		if (inode) {
 985			spin_unlock(&root->inode_lock);
 986			return inode;
 987		}
 988
 989		objectid = btrfs_ino(entry) + 1;
 990		if (cond_resched_lock(&root->inode_lock))
 991			goto again;
 992
 993		node = rb_next(node);
 994	}
 995	spin_unlock(&root->inode_lock);
 996	return NULL;
 997}
 998
 999/*
1000 * get new location of data
1001 */
1002static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1003			    u64 bytenr, u64 num_bytes)
1004{
1005	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1006	struct btrfs_path *path;
1007	struct btrfs_file_extent_item *fi;
1008	struct extent_buffer *leaf;
1009	int ret;
1010
1011	path = btrfs_alloc_path();
1012	if (!path)
1013		return -ENOMEM;
1014
1015	bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1016	ret = btrfs_lookup_file_extent(NULL, root, path,
1017			btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1018	if (ret < 0)
1019		goto out;
1020	if (ret > 0) {
1021		ret = -ENOENT;
1022		goto out;
1023	}
1024
1025	leaf = path->nodes[0];
1026	fi = btrfs_item_ptr(leaf, path->slots[0],
1027			    struct btrfs_file_extent_item);
1028
1029	BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1030	       btrfs_file_extent_compression(leaf, fi) ||
1031	       btrfs_file_extent_encryption(leaf, fi) ||
1032	       btrfs_file_extent_other_encoding(leaf, fi));
1033
1034	if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1035		ret = -EINVAL;
1036		goto out;
1037	}
1038
1039	*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1040	ret = 0;
1041out:
1042	btrfs_free_path(path);
1043	return ret;
1044}
1045
1046/*
1047 * update file extent items in the tree leaf to point to
1048 * the new locations.
1049 */
1050static noinline_for_stack
1051int replace_file_extents(struct btrfs_trans_handle *trans,
1052			 struct reloc_control *rc,
1053			 struct btrfs_root *root,
1054			 struct extent_buffer *leaf)
1055{
1056	struct btrfs_fs_info *fs_info = root->fs_info;
1057	struct btrfs_key key;
1058	struct btrfs_file_extent_item *fi;
1059	struct inode *inode = NULL;
1060	u64 parent;
1061	u64 bytenr;
1062	u64 new_bytenr = 0;
1063	u64 num_bytes;
1064	u64 end;
1065	u32 nritems;
1066	u32 i;
1067	int ret = 0;
1068	int first = 1;
1069	int dirty = 0;
1070
1071	if (rc->stage != UPDATE_DATA_PTRS)
1072		return 0;
1073
1074	/* reloc trees always use full backref */
1075	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1076		parent = leaf->start;
1077	else
1078		parent = 0;
1079
1080	nritems = btrfs_header_nritems(leaf);
1081	for (i = 0; i < nritems; i++) {
1082		struct btrfs_ref ref = { 0 };
1083
1084		cond_resched();
1085		btrfs_item_key_to_cpu(leaf, &key, i);
1086		if (key.type != BTRFS_EXTENT_DATA_KEY)
1087			continue;
1088		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1089		if (btrfs_file_extent_type(leaf, fi) ==
1090		    BTRFS_FILE_EXTENT_INLINE)
1091			continue;
1092		bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1093		num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1094		if (bytenr == 0)
1095			continue;
1096		if (!in_range(bytenr, rc->block_group->start,
1097			      rc->block_group->length))
1098			continue;
1099
1100		/*
1101		 * if we are modifying block in fs tree, wait for readpage
1102		 * to complete and drop the extent cache
1103		 */
1104		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1105			if (first) {
1106				inode = find_next_inode(root, key.objectid);
1107				first = 0;
1108			} else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1109				btrfs_add_delayed_iput(inode);
1110				inode = find_next_inode(root, key.objectid);
1111			}
1112			if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
 
 
1113				end = key.offset +
1114				      btrfs_file_extent_num_bytes(leaf, fi);
1115				WARN_ON(!IS_ALIGNED(key.offset,
1116						    fs_info->sectorsize));
1117				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1118				end--;
1119				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1120						      key.offset, end);
 
1121				if (!ret)
1122					continue;
1123
1124				btrfs_drop_extent_cache(BTRFS_I(inode),
1125						key.offset,	end, 1);
1126				unlock_extent(&BTRFS_I(inode)->io_tree,
1127					      key.offset, end);
1128			}
1129		}
1130
1131		ret = get_new_location(rc->data_inode, &new_bytenr,
1132				       bytenr, num_bytes);
1133		if (ret) {
1134			/*
1135			 * Don't have to abort since we've not changed anything
1136			 * in the file extent yet.
1137			 */
1138			break;
1139		}
1140
1141		btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1142		dirty = 1;
1143
1144		key.offset -= btrfs_file_extent_offset(leaf, fi);
1145		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1146				       num_bytes, parent);
1147		ref.real_root = root->root_key.objectid;
1148		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1149				    key.objectid, key.offset);
 
1150		ret = btrfs_inc_extent_ref(trans, &ref);
1151		if (ret) {
1152			btrfs_abort_transaction(trans, ret);
1153			break;
1154		}
1155
1156		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1157				       num_bytes, parent);
1158		ref.real_root = root->root_key.objectid;
1159		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1160				    key.objectid, key.offset);
 
1161		ret = btrfs_free_extent(trans, &ref);
1162		if (ret) {
1163			btrfs_abort_transaction(trans, ret);
1164			break;
1165		}
1166	}
1167	if (dirty)
1168		btrfs_mark_buffer_dirty(leaf);
1169	if (inode)
1170		btrfs_add_delayed_iput(inode);
1171	return ret;
1172}
1173
1174static noinline_for_stack
1175int memcmp_node_keys(struct extent_buffer *eb, int slot,
1176		     struct btrfs_path *path, int level)
1177{
1178	struct btrfs_disk_key key1;
1179	struct btrfs_disk_key key2;
1180	btrfs_node_key(eb, &key1, slot);
1181	btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1182	return memcmp(&key1, &key2, sizeof(key1));
1183}
1184
1185/*
1186 * try to replace tree blocks in fs tree with the new blocks
1187 * in reloc tree. tree blocks haven't been modified since the
1188 * reloc tree was create can be replaced.
1189 *
1190 * if a block was replaced, level of the block + 1 is returned.
1191 * if no block got replaced, 0 is returned. if there are other
1192 * errors, a negative error number is returned.
1193 */
1194static noinline_for_stack
1195int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1196		 struct btrfs_root *dest, struct btrfs_root *src,
1197		 struct btrfs_path *path, struct btrfs_key *next_key,
1198		 int lowest_level, int max_level)
1199{
1200	struct btrfs_fs_info *fs_info = dest->fs_info;
1201	struct extent_buffer *eb;
1202	struct extent_buffer *parent;
1203	struct btrfs_ref ref = { 0 };
1204	struct btrfs_key key;
1205	u64 old_bytenr;
1206	u64 new_bytenr;
1207	u64 old_ptr_gen;
1208	u64 new_ptr_gen;
1209	u64 last_snapshot;
1210	u32 blocksize;
1211	int cow = 0;
1212	int level;
1213	int ret;
1214	int slot;
1215
1216	ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1217	ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1218
1219	last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1220again:
1221	slot = path->slots[lowest_level];
1222	btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1223
1224	eb = btrfs_lock_root_node(dest);
1225	level = btrfs_header_level(eb);
1226
1227	if (level < lowest_level) {
1228		btrfs_tree_unlock(eb);
1229		free_extent_buffer(eb);
1230		return 0;
1231	}
1232
1233	if (cow) {
1234		ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1235				      BTRFS_NESTING_COW);
1236		if (ret) {
1237			btrfs_tree_unlock(eb);
1238			free_extent_buffer(eb);
1239			return ret;
1240		}
1241	}
1242
1243	if (next_key) {
1244		next_key->objectid = (u64)-1;
1245		next_key->type = (u8)-1;
1246		next_key->offset = (u64)-1;
1247	}
1248
1249	parent = eb;
1250	while (1) {
1251		level = btrfs_header_level(parent);
1252		ASSERT(level >= lowest_level);
1253
1254		ret = btrfs_bin_search(parent, &key, &slot);
1255		if (ret < 0)
1256			break;
1257		if (ret && slot > 0)
1258			slot--;
1259
1260		if (next_key && slot + 1 < btrfs_header_nritems(parent))
1261			btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1262
1263		old_bytenr = btrfs_node_blockptr(parent, slot);
1264		blocksize = fs_info->nodesize;
1265		old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1266
1267		if (level <= max_level) {
1268			eb = path->nodes[level];
1269			new_bytenr = btrfs_node_blockptr(eb,
1270							path->slots[level]);
1271			new_ptr_gen = btrfs_node_ptr_generation(eb,
1272							path->slots[level]);
1273		} else {
1274			new_bytenr = 0;
1275			new_ptr_gen = 0;
1276		}
1277
1278		if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1279			ret = level;
1280			break;
1281		}
1282
1283		if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1284		    memcmp_node_keys(parent, slot, path, level)) {
1285			if (level <= lowest_level) {
1286				ret = 0;
1287				break;
1288			}
1289
1290			eb = btrfs_read_node_slot(parent, slot);
1291			if (IS_ERR(eb)) {
1292				ret = PTR_ERR(eb);
1293				break;
1294			}
1295			btrfs_tree_lock(eb);
1296			if (cow) {
1297				ret = btrfs_cow_block(trans, dest, eb, parent,
1298						      slot, &eb,
1299						      BTRFS_NESTING_COW);
1300				if (ret) {
1301					btrfs_tree_unlock(eb);
1302					free_extent_buffer(eb);
1303					break;
1304				}
1305			}
1306
1307			btrfs_tree_unlock(parent);
1308			free_extent_buffer(parent);
1309
1310			parent = eb;
1311			continue;
1312		}
1313
1314		if (!cow) {
1315			btrfs_tree_unlock(parent);
1316			free_extent_buffer(parent);
1317			cow = 1;
1318			goto again;
1319		}
1320
1321		btrfs_node_key_to_cpu(path->nodes[level], &key,
1322				      path->slots[level]);
1323		btrfs_release_path(path);
1324
1325		path->lowest_level = level;
 
1326		ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
 
1327		path->lowest_level = 0;
1328		if (ret) {
1329			if (ret > 0)
1330				ret = -ENOENT;
1331			break;
1332		}
1333
1334		/*
1335		 * Info qgroup to trace both subtrees.
1336		 *
1337		 * We must trace both trees.
1338		 * 1) Tree reloc subtree
1339		 *    If not traced, we will leak data numbers
1340		 * 2) Fs subtree
1341		 *    If not traced, we will double count old data
1342		 *
1343		 * We don't scan the subtree right now, but only record
1344		 * the swapped tree blocks.
1345		 * The real subtree rescan is delayed until we have new
1346		 * CoW on the subtree root node before transaction commit.
1347		 */
1348		ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1349				rc->block_group, parent, slot,
1350				path->nodes[level], path->slots[level],
1351				last_snapshot);
1352		if (ret < 0)
1353			break;
1354		/*
1355		 * swap blocks in fs tree and reloc tree.
1356		 */
1357		btrfs_set_node_blockptr(parent, slot, new_bytenr);
1358		btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1359		btrfs_mark_buffer_dirty(parent);
1360
1361		btrfs_set_node_blockptr(path->nodes[level],
1362					path->slots[level], old_bytenr);
1363		btrfs_set_node_ptr_generation(path->nodes[level],
1364					      path->slots[level], old_ptr_gen);
1365		btrfs_mark_buffer_dirty(path->nodes[level]);
1366
1367		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1368				       blocksize, path->nodes[level]->start);
1369		ref.skip_qgroup = true;
1370		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
1371		ret = btrfs_inc_extent_ref(trans, &ref);
1372		if (ret) {
1373			btrfs_abort_transaction(trans, ret);
1374			break;
1375		}
1376		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1377				       blocksize, 0);
1378		ref.skip_qgroup = true;
1379		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
1380		ret = btrfs_inc_extent_ref(trans, &ref);
1381		if (ret) {
1382			btrfs_abort_transaction(trans, ret);
1383			break;
1384		}
1385
1386		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1387				       blocksize, path->nodes[level]->start);
1388		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
1389		ref.skip_qgroup = true;
1390		ret = btrfs_free_extent(trans, &ref);
1391		if (ret) {
1392			btrfs_abort_transaction(trans, ret);
1393			break;
1394		}
1395
1396		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1397				       blocksize, 0);
1398		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
1399		ref.skip_qgroup = true;
1400		ret = btrfs_free_extent(trans, &ref);
1401		if (ret) {
1402			btrfs_abort_transaction(trans, ret);
1403			break;
1404		}
1405
1406		btrfs_unlock_up_safe(path, 0);
1407
1408		ret = level;
1409		break;
1410	}
1411	btrfs_tree_unlock(parent);
1412	free_extent_buffer(parent);
1413	return ret;
1414}
1415
1416/*
1417 * helper to find next relocated block in reloc tree
1418 */
1419static noinline_for_stack
1420int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1421		       int *level)
1422{
1423	struct extent_buffer *eb;
1424	int i;
1425	u64 last_snapshot;
1426	u32 nritems;
1427
1428	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1429
1430	for (i = 0; i < *level; i++) {
1431		free_extent_buffer(path->nodes[i]);
1432		path->nodes[i] = NULL;
1433	}
1434
1435	for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1436		eb = path->nodes[i];
1437		nritems = btrfs_header_nritems(eb);
1438		while (path->slots[i] + 1 < nritems) {
1439			path->slots[i]++;
1440			if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1441			    last_snapshot)
1442				continue;
1443
1444			*level = i;
1445			return 0;
1446		}
1447		free_extent_buffer(path->nodes[i]);
1448		path->nodes[i] = NULL;
1449	}
1450	return 1;
1451}
1452
1453/*
1454 * walk down reloc tree to find relocated block of lowest level
1455 */
1456static noinline_for_stack
1457int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1458			 int *level)
1459{
1460	struct extent_buffer *eb = NULL;
1461	int i;
1462	u64 ptr_gen = 0;
1463	u64 last_snapshot;
1464	u32 nritems;
1465
1466	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1467
1468	for (i = *level; i > 0; i--) {
1469		eb = path->nodes[i];
1470		nritems = btrfs_header_nritems(eb);
1471		while (path->slots[i] < nritems) {
1472			ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1473			if (ptr_gen > last_snapshot)
1474				break;
1475			path->slots[i]++;
1476		}
1477		if (path->slots[i] >= nritems) {
1478			if (i == *level)
1479				break;
1480			*level = i + 1;
1481			return 0;
1482		}
1483		if (i == 1) {
1484			*level = i;
1485			return 0;
1486		}
1487
1488		eb = btrfs_read_node_slot(eb, path->slots[i]);
1489		if (IS_ERR(eb))
1490			return PTR_ERR(eb);
1491		BUG_ON(btrfs_header_level(eb) != i - 1);
1492		path->nodes[i - 1] = eb;
1493		path->slots[i - 1] = 0;
1494	}
1495	return 1;
1496}
1497
1498/*
1499 * invalidate extent cache for file extents whose key in range of
1500 * [min_key, max_key)
1501 */
1502static int invalidate_extent_cache(struct btrfs_root *root,
1503				   struct btrfs_key *min_key,
1504				   struct btrfs_key *max_key)
1505{
1506	struct btrfs_fs_info *fs_info = root->fs_info;
1507	struct inode *inode = NULL;
1508	u64 objectid;
1509	u64 start, end;
1510	u64 ino;
1511
1512	objectid = min_key->objectid;
1513	while (1) {
 
 
1514		cond_resched();
1515		iput(inode);
1516
1517		if (objectid > max_key->objectid)
1518			break;
1519
1520		inode = find_next_inode(root, objectid);
1521		if (!inode)
1522			break;
1523		ino = btrfs_ino(BTRFS_I(inode));
1524
1525		if (ino > max_key->objectid) {
1526			iput(inode);
1527			break;
1528		}
1529
1530		objectid = ino + 1;
1531		if (!S_ISREG(inode->i_mode))
1532			continue;
1533
1534		if (unlikely(min_key->objectid == ino)) {
1535			if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1536				continue;
1537			if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1538				start = 0;
1539			else {
1540				start = min_key->offset;
1541				WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1542			}
1543		} else {
1544			start = 0;
1545		}
1546
1547		if (unlikely(max_key->objectid == ino)) {
1548			if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1549				continue;
1550			if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1551				end = (u64)-1;
1552			} else {
1553				if (max_key->offset == 0)
1554					continue;
1555				end = max_key->offset;
1556				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1557				end--;
1558			}
1559		} else {
1560			end = (u64)-1;
1561		}
1562
1563		/* the lock_extent waits for readpage to complete */
1564		lock_extent(&BTRFS_I(inode)->io_tree, start, end);
1565		btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
1566		unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
1567	}
1568	return 0;
1569}
1570
1571static int find_next_key(struct btrfs_path *path, int level,
1572			 struct btrfs_key *key)
1573
1574{
1575	while (level < BTRFS_MAX_LEVEL) {
1576		if (!path->nodes[level])
1577			break;
1578		if (path->slots[level] + 1 <
1579		    btrfs_header_nritems(path->nodes[level])) {
1580			btrfs_node_key_to_cpu(path->nodes[level], key,
1581					      path->slots[level] + 1);
1582			return 0;
1583		}
1584		level++;
1585	}
1586	return 1;
1587}
1588
1589/*
1590 * Insert current subvolume into reloc_control::dirty_subvol_roots
1591 */
1592static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1593			       struct reloc_control *rc,
1594			       struct btrfs_root *root)
1595{
1596	struct btrfs_root *reloc_root = root->reloc_root;
1597	struct btrfs_root_item *reloc_root_item;
1598	int ret;
1599
1600	/* @root must be a subvolume tree root with a valid reloc tree */
1601	ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1602	ASSERT(reloc_root);
1603
1604	reloc_root_item = &reloc_root->root_item;
1605	memset(&reloc_root_item->drop_progress, 0,
1606		sizeof(reloc_root_item->drop_progress));
1607	btrfs_set_root_drop_level(reloc_root_item, 0);
1608	btrfs_set_root_refs(reloc_root_item, 0);
1609	ret = btrfs_update_reloc_root(trans, root);
1610	if (ret)
1611		return ret;
1612
1613	if (list_empty(&root->reloc_dirty_list)) {
1614		btrfs_grab_root(root);
1615		list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1616	}
1617
1618	return 0;
1619}
1620
1621static int clean_dirty_subvols(struct reloc_control *rc)
1622{
1623	struct btrfs_root *root;
1624	struct btrfs_root *next;
1625	int ret = 0;
1626	int ret2;
1627
1628	list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1629				 reloc_dirty_list) {
1630		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1631			/* Merged subvolume, cleanup its reloc root */
1632			struct btrfs_root *reloc_root = root->reloc_root;
1633
1634			list_del_init(&root->reloc_dirty_list);
1635			root->reloc_root = NULL;
1636			/*
1637			 * Need barrier to ensure clear_bit() only happens after
1638			 * root->reloc_root = NULL. Pairs with have_reloc_root.
1639			 */
1640			smp_wmb();
1641			clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1642			if (reloc_root) {
1643				/*
1644				 * btrfs_drop_snapshot drops our ref we hold for
1645				 * ->reloc_root.  If it fails however we must
1646				 * drop the ref ourselves.
1647				 */
1648				ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1649				if (ret2 < 0) {
1650					btrfs_put_root(reloc_root);
1651					if (!ret)
1652						ret = ret2;
1653				}
1654			}
1655			btrfs_put_root(root);
1656		} else {
1657			/* Orphan reloc tree, just clean it up */
1658			ret2 = btrfs_drop_snapshot(root, 0, 1);
1659			if (ret2 < 0) {
1660				btrfs_put_root(root);
1661				if (!ret)
1662					ret = ret2;
1663			}
1664		}
1665	}
1666	return ret;
1667}
1668
1669/*
1670 * merge the relocated tree blocks in reloc tree with corresponding
1671 * fs tree.
1672 */
1673static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1674					       struct btrfs_root *root)
1675{
1676	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1677	struct btrfs_key key;
1678	struct btrfs_key next_key;
1679	struct btrfs_trans_handle *trans = NULL;
1680	struct btrfs_root *reloc_root;
1681	struct btrfs_root_item *root_item;
1682	struct btrfs_path *path;
1683	struct extent_buffer *leaf;
1684	int reserve_level;
1685	int level;
1686	int max_level;
1687	int replaced = 0;
1688	int ret = 0;
1689	u32 min_reserved;
1690
1691	path = btrfs_alloc_path();
1692	if (!path)
1693		return -ENOMEM;
1694	path->reada = READA_FORWARD;
1695
1696	reloc_root = root->reloc_root;
1697	root_item = &reloc_root->root_item;
1698
1699	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1700		level = btrfs_root_level(root_item);
1701		atomic_inc(&reloc_root->node->refs);
1702		path->nodes[level] = reloc_root->node;
1703		path->slots[level] = 0;
1704	} else {
1705		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1706
1707		level = btrfs_root_drop_level(root_item);
1708		BUG_ON(level == 0);
1709		path->lowest_level = level;
1710		ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1711		path->lowest_level = 0;
1712		if (ret < 0) {
1713			btrfs_free_path(path);
1714			return ret;
1715		}
1716
1717		btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1718				      path->slots[level]);
1719		WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1720
1721		btrfs_unlock_up_safe(path, 0);
1722	}
1723
1724	/*
1725	 * In merge_reloc_root(), we modify the upper level pointer to swap the
1726	 * tree blocks between reloc tree and subvolume tree.  Thus for tree
1727	 * block COW, we COW at most from level 1 to root level for each tree.
1728	 *
1729	 * Thus the needed metadata size is at most root_level * nodesize,
1730	 * and * 2 since we have two trees to COW.
1731	 */
1732	reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1733	min_reserved = fs_info->nodesize * reserve_level * 2;
1734	memset(&next_key, 0, sizeof(next_key));
1735
1736	while (1) {
1737		ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
 
1738					     BTRFS_RESERVE_FLUSH_LIMIT);
1739		if (ret)
1740			goto out;
1741		trans = btrfs_start_transaction(root, 0);
1742		if (IS_ERR(trans)) {
1743			ret = PTR_ERR(trans);
1744			trans = NULL;
1745			goto out;
1746		}
1747
1748		/*
1749		 * At this point we no longer have a reloc_control, so we can't
1750		 * depend on btrfs_init_reloc_root to update our last_trans.
1751		 *
1752		 * But that's ok, we started the trans handle on our
1753		 * corresponding fs_root, which means it's been added to the
1754		 * dirty list.  At commit time we'll still call
1755		 * btrfs_update_reloc_root() and update our root item
1756		 * appropriately.
1757		 */
1758		reloc_root->last_trans = trans->transid;
1759		trans->block_rsv = rc->block_rsv;
1760
1761		replaced = 0;
1762		max_level = level;
1763
1764		ret = walk_down_reloc_tree(reloc_root, path, &level);
1765		if (ret < 0)
1766			goto out;
1767		if (ret > 0)
1768			break;
1769
1770		if (!find_next_key(path, level, &key) &&
1771		    btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1772			ret = 0;
1773		} else {
1774			ret = replace_path(trans, rc, root, reloc_root, path,
1775					   &next_key, level, max_level);
1776		}
1777		if (ret < 0)
1778			goto out;
1779		if (ret > 0) {
1780			level = ret;
1781			btrfs_node_key_to_cpu(path->nodes[level], &key,
1782					      path->slots[level]);
1783			replaced = 1;
1784		}
1785
1786		ret = walk_up_reloc_tree(reloc_root, path, &level);
1787		if (ret > 0)
1788			break;
1789
1790		BUG_ON(level == 0);
1791		/*
1792		 * save the merging progress in the drop_progress.
1793		 * this is OK since root refs == 1 in this case.
1794		 */
1795		btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1796			       path->slots[level]);
1797		btrfs_set_root_drop_level(root_item, level);
1798
1799		btrfs_end_transaction_throttle(trans);
1800		trans = NULL;
1801
1802		btrfs_btree_balance_dirty(fs_info);
1803
1804		if (replaced && rc->stage == UPDATE_DATA_PTRS)
1805			invalidate_extent_cache(root, &key, &next_key);
1806	}
1807
1808	/*
1809	 * handle the case only one block in the fs tree need to be
1810	 * relocated and the block is tree root.
1811	 */
1812	leaf = btrfs_lock_root_node(root);
1813	ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1814			      BTRFS_NESTING_COW);
1815	btrfs_tree_unlock(leaf);
1816	free_extent_buffer(leaf);
1817out:
1818	btrfs_free_path(path);
1819
1820	if (ret == 0) {
1821		ret = insert_dirty_subvol(trans, rc, root);
1822		if (ret)
1823			btrfs_abort_transaction(trans, ret);
1824	}
1825
1826	if (trans)
1827		btrfs_end_transaction_throttle(trans);
1828
1829	btrfs_btree_balance_dirty(fs_info);
1830
1831	if (replaced && rc->stage == UPDATE_DATA_PTRS)
1832		invalidate_extent_cache(root, &key, &next_key);
1833
1834	return ret;
1835}
1836
1837static noinline_for_stack
1838int prepare_to_merge(struct reloc_control *rc, int err)
1839{
1840	struct btrfs_root *root = rc->extent_root;
1841	struct btrfs_fs_info *fs_info = root->fs_info;
1842	struct btrfs_root *reloc_root;
1843	struct btrfs_trans_handle *trans;
1844	LIST_HEAD(reloc_roots);
1845	u64 num_bytes = 0;
1846	int ret;
1847
1848	mutex_lock(&fs_info->reloc_mutex);
1849	rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1850	rc->merging_rsv_size += rc->nodes_relocated * 2;
1851	mutex_unlock(&fs_info->reloc_mutex);
1852
1853again:
1854	if (!err) {
1855		num_bytes = rc->merging_rsv_size;
1856		ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
1857					  BTRFS_RESERVE_FLUSH_ALL);
1858		if (ret)
1859			err = ret;
1860	}
1861
1862	trans = btrfs_join_transaction(rc->extent_root);
1863	if (IS_ERR(trans)) {
1864		if (!err)
1865			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1866						num_bytes, NULL);
1867		return PTR_ERR(trans);
1868	}
1869
1870	if (!err) {
1871		if (num_bytes != rc->merging_rsv_size) {
1872			btrfs_end_transaction(trans);
1873			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1874						num_bytes, NULL);
1875			goto again;
1876		}
1877	}
1878
1879	rc->merge_reloc_tree = 1;
1880
1881	while (!list_empty(&rc->reloc_roots)) {
1882		reloc_root = list_entry(rc->reloc_roots.next,
1883					struct btrfs_root, root_list);
1884		list_del_init(&reloc_root->root_list);
1885
1886		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1887				false);
1888		if (IS_ERR(root)) {
1889			/*
1890			 * Even if we have an error we need this reloc root
1891			 * back on our list so we can clean up properly.
1892			 */
1893			list_add(&reloc_root->root_list, &reloc_roots);
1894			btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1895			if (!err)
1896				err = PTR_ERR(root);
1897			break;
1898		}
1899		ASSERT(root->reloc_root == reloc_root);
1900
1901		/*
1902		 * set reference count to 1, so btrfs_recover_relocation
1903		 * knows it should resumes merging
1904		 */
1905		if (!err)
1906			btrfs_set_root_refs(&reloc_root->root_item, 1);
1907		ret = btrfs_update_reloc_root(trans, root);
1908
1909		/*
1910		 * Even if we have an error we need this reloc root back on our
1911		 * list so we can clean up properly.
1912		 */
1913		list_add(&reloc_root->root_list, &reloc_roots);
1914		btrfs_put_root(root);
1915
1916		if (ret) {
1917			btrfs_abort_transaction(trans, ret);
1918			if (!err)
1919				err = ret;
1920			break;
1921		}
1922	}
1923
1924	list_splice(&reloc_roots, &rc->reloc_roots);
1925
1926	if (!err)
1927		err = btrfs_commit_transaction(trans);
1928	else
1929		btrfs_end_transaction(trans);
1930	return err;
1931}
1932
1933static noinline_for_stack
1934void free_reloc_roots(struct list_head *list)
1935{
1936	struct btrfs_root *reloc_root, *tmp;
1937
1938	list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1939		__del_reloc_root(reloc_root);
1940}
1941
1942static noinline_for_stack
1943void merge_reloc_roots(struct reloc_control *rc)
1944{
1945	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1946	struct btrfs_root *root;
1947	struct btrfs_root *reloc_root;
1948	LIST_HEAD(reloc_roots);
1949	int found = 0;
1950	int ret = 0;
1951again:
1952	root = rc->extent_root;
1953
1954	/*
1955	 * this serializes us with btrfs_record_root_in_transaction,
1956	 * we have to make sure nobody is in the middle of
1957	 * adding their roots to the list while we are
1958	 * doing this splice
1959	 */
1960	mutex_lock(&fs_info->reloc_mutex);
1961	list_splice_init(&rc->reloc_roots, &reloc_roots);
1962	mutex_unlock(&fs_info->reloc_mutex);
1963
1964	while (!list_empty(&reloc_roots)) {
1965		found = 1;
1966		reloc_root = list_entry(reloc_roots.next,
1967					struct btrfs_root, root_list);
1968
1969		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1970					 false);
1971		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1972			if (IS_ERR(root)) {
1973				/*
1974				 * For recovery we read the fs roots on mount,
1975				 * and if we didn't find the root then we marked
1976				 * the reloc root as a garbage root.  For normal
1977				 * relocation obviously the root should exist in
1978				 * memory.  However there's no reason we can't
1979				 * handle the error properly here just in case.
1980				 */
1981				ASSERT(0);
1982				ret = PTR_ERR(root);
1983				goto out;
1984			}
1985			if (root->reloc_root != reloc_root) {
1986				/*
1987				 * This is actually impossible without something
1988				 * going really wrong (like weird race condition
1989				 * or cosmic rays).
1990				 */
1991				ASSERT(0);
1992				ret = -EINVAL;
1993				goto out;
1994			}
1995			ret = merge_reloc_root(rc, root);
1996			btrfs_put_root(root);
1997			if (ret) {
1998				if (list_empty(&reloc_root->root_list))
1999					list_add_tail(&reloc_root->root_list,
2000						      &reloc_roots);
2001				goto out;
2002			}
2003		} else {
2004			if (!IS_ERR(root)) {
2005				if (root->reloc_root == reloc_root) {
2006					root->reloc_root = NULL;
2007					btrfs_put_root(reloc_root);
2008				}
2009				clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2010					  &root->state);
2011				btrfs_put_root(root);
2012			}
2013
2014			list_del_init(&reloc_root->root_list);
2015			/* Don't forget to queue this reloc root for cleanup */
2016			list_add_tail(&reloc_root->reloc_dirty_list,
2017				      &rc->dirty_subvol_roots);
2018		}
2019	}
2020
2021	if (found) {
2022		found = 0;
2023		goto again;
2024	}
2025out:
2026	if (ret) {
2027		btrfs_handle_fs_error(fs_info, ret, NULL);
2028		free_reloc_roots(&reloc_roots);
2029
2030		/* new reloc root may be added */
2031		mutex_lock(&fs_info->reloc_mutex);
2032		list_splice_init(&rc->reloc_roots, &reloc_roots);
2033		mutex_unlock(&fs_info->reloc_mutex);
2034		free_reloc_roots(&reloc_roots);
2035	}
2036
2037	/*
2038	 * We used to have
2039	 *
2040	 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2041	 *
2042	 * here, but it's wrong.  If we fail to start the transaction in
2043	 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2044	 * have actually been removed from the reloc_root_tree rb tree.  This is
2045	 * fine because we're bailing here, and we hold a reference on the root
2046	 * for the list that holds it, so these roots will be cleaned up when we
2047	 * do the reloc_dirty_list afterwards.  Meanwhile the root->reloc_root
2048	 * will be cleaned up on unmount.
2049	 *
2050	 * The remaining nodes will be cleaned up by free_reloc_control.
2051	 */
2052}
2053
2054static void free_block_list(struct rb_root *blocks)
2055{
2056	struct tree_block *block;
2057	struct rb_node *rb_node;
2058	while ((rb_node = rb_first(blocks))) {
2059		block = rb_entry(rb_node, struct tree_block, rb_node);
2060		rb_erase(rb_node, blocks);
2061		kfree(block);
2062	}
2063}
2064
2065static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2066				      struct btrfs_root *reloc_root)
2067{
2068	struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2069	struct btrfs_root *root;
2070	int ret;
2071
2072	if (reloc_root->last_trans == trans->transid)
2073		return 0;
2074
2075	root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2076
2077	/*
2078	 * This should succeed, since we can't have a reloc root without having
2079	 * already looked up the actual root and created the reloc root for this
2080	 * root.
2081	 *
2082	 * However if there's some sort of corruption where we have a ref to a
2083	 * reloc root without a corresponding root this could return ENOENT.
2084	 */
2085	if (IS_ERR(root)) {
2086		ASSERT(0);
2087		return PTR_ERR(root);
2088	}
2089	if (root->reloc_root != reloc_root) {
2090		ASSERT(0);
2091		btrfs_err(fs_info,
2092			  "root %llu has two reloc roots associated with it",
2093			  reloc_root->root_key.offset);
2094		btrfs_put_root(root);
2095		return -EUCLEAN;
2096	}
2097	ret = btrfs_record_root_in_trans(trans, root);
2098	btrfs_put_root(root);
2099
2100	return ret;
2101}
2102
2103static noinline_for_stack
2104struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2105				     struct reloc_control *rc,
2106				     struct btrfs_backref_node *node,
2107				     struct btrfs_backref_edge *edges[])
2108{
2109	struct btrfs_backref_node *next;
2110	struct btrfs_root *root;
2111	int index = 0;
2112	int ret;
2113
2114	next = node;
2115	while (1) {
2116		cond_resched();
2117		next = walk_up_backref(next, edges, &index);
2118		root = next->root;
2119
2120		/*
2121		 * If there is no root, then our references for this block are
2122		 * incomplete, as we should be able to walk all the way up to a
2123		 * block that is owned by a root.
2124		 *
2125		 * This path is only for SHAREABLE roots, so if we come upon a
2126		 * non-SHAREABLE root then we have backrefs that resolve
2127		 * improperly.
2128		 *
2129		 * Both of these cases indicate file system corruption, or a bug
2130		 * in the backref walking code.
2131		 */
2132		if (!root) {
2133			ASSERT(0);
2134			btrfs_err(trans->fs_info,
2135		"bytenr %llu doesn't have a backref path ending in a root",
2136				  node->bytenr);
2137			return ERR_PTR(-EUCLEAN);
2138		}
2139		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2140			ASSERT(0);
2141			btrfs_err(trans->fs_info,
2142	"bytenr %llu has multiple refs with one ending in a non-shareable root",
2143				  node->bytenr);
2144			return ERR_PTR(-EUCLEAN);
2145		}
2146
2147		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2148			ret = record_reloc_root_in_trans(trans, root);
2149			if (ret)
2150				return ERR_PTR(ret);
2151			break;
2152		}
2153
2154		ret = btrfs_record_root_in_trans(trans, root);
2155		if (ret)
2156			return ERR_PTR(ret);
2157		root = root->reloc_root;
2158
2159		/*
2160		 * We could have raced with another thread which failed, so
2161		 * root->reloc_root may not be set, return ENOENT in this case.
2162		 */
2163		if (!root)
2164			return ERR_PTR(-ENOENT);
2165
2166		if (next->new_bytenr != root->node->start) {
2167			/*
2168			 * We just created the reloc root, so we shouldn't have
2169			 * ->new_bytenr set and this shouldn't be in the changed
2170			 *  list.  If it is then we have multiple roots pointing
2171			 *  at the same bytenr which indicates corruption, or
2172			 *  we've made a mistake in the backref walking code.
2173			 */
2174			ASSERT(next->new_bytenr == 0);
2175			ASSERT(list_empty(&next->list));
2176			if (next->new_bytenr || !list_empty(&next->list)) {
2177				btrfs_err(trans->fs_info,
2178	"bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2179					  node->bytenr, next->bytenr);
2180				return ERR_PTR(-EUCLEAN);
2181			}
2182
2183			next->new_bytenr = root->node->start;
2184			btrfs_put_root(next->root);
2185			next->root = btrfs_grab_root(root);
2186			ASSERT(next->root);
2187			list_add_tail(&next->list,
2188				      &rc->backref_cache.changed);
2189			mark_block_processed(rc, next);
2190			break;
2191		}
2192
2193		WARN_ON(1);
2194		root = NULL;
2195		next = walk_down_backref(edges, &index);
2196		if (!next || next->level <= node->level)
2197			break;
2198	}
2199	if (!root) {
2200		/*
2201		 * This can happen if there's fs corruption or if there's a bug
2202		 * in the backref lookup code.
2203		 */
2204		ASSERT(0);
2205		return ERR_PTR(-ENOENT);
2206	}
2207
2208	next = node;
2209	/* setup backref node path for btrfs_reloc_cow_block */
2210	while (1) {
2211		rc->backref_cache.path[next->level] = next;
2212		if (--index < 0)
2213			break;
2214		next = edges[index]->node[UPPER];
2215	}
2216	return root;
2217}
2218
2219/*
2220 * Select a tree root for relocation.
2221 *
2222 * Return NULL if the block is not shareable. We should use do_relocation() in
2223 * this case.
2224 *
2225 * Return a tree root pointer if the block is shareable.
2226 * Return -ENOENT if the block is root of reloc tree.
2227 */
2228static noinline_for_stack
2229struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2230{
2231	struct btrfs_backref_node *next;
2232	struct btrfs_root *root;
2233	struct btrfs_root *fs_root = NULL;
2234	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2235	int index = 0;
2236
2237	next = node;
2238	while (1) {
2239		cond_resched();
2240		next = walk_up_backref(next, edges, &index);
2241		root = next->root;
2242
2243		/*
2244		 * This can occur if we have incomplete extent refs leading all
2245		 * the way up a particular path, in this case return -EUCLEAN.
2246		 */
2247		if (!root)
2248			return ERR_PTR(-EUCLEAN);
2249
2250		/* No other choice for non-shareable tree */
2251		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2252			return root;
2253
2254		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2255			fs_root = root;
2256
2257		if (next != node)
2258			return NULL;
2259
2260		next = walk_down_backref(edges, &index);
2261		if (!next || next->level <= node->level)
2262			break;
2263	}
2264
2265	if (!fs_root)
2266		return ERR_PTR(-ENOENT);
2267	return fs_root;
2268}
2269
2270static noinline_for_stack
2271u64 calcu_metadata_size(struct reloc_control *rc,
2272			struct btrfs_backref_node *node, int reserve)
2273{
2274	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2275	struct btrfs_backref_node *next = node;
2276	struct btrfs_backref_edge *edge;
2277	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2278	u64 num_bytes = 0;
2279	int index = 0;
2280
2281	BUG_ON(reserve && node->processed);
2282
2283	while (next) {
2284		cond_resched();
2285		while (1) {
2286			if (next->processed && (reserve || next != node))
2287				break;
2288
2289			num_bytes += fs_info->nodesize;
2290
2291			if (list_empty(&next->upper))
2292				break;
2293
2294			edge = list_entry(next->upper.next,
2295					struct btrfs_backref_edge, list[LOWER]);
2296			edges[index++] = edge;
2297			next = edge->node[UPPER];
2298		}
2299		next = walk_down_backref(edges, &index);
2300	}
2301	return num_bytes;
2302}
2303
2304static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2305				  struct reloc_control *rc,
2306				  struct btrfs_backref_node *node)
2307{
2308	struct btrfs_root *root = rc->extent_root;
2309	struct btrfs_fs_info *fs_info = root->fs_info;
2310	u64 num_bytes;
2311	int ret;
2312	u64 tmp;
2313
2314	num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2315
2316	trans->block_rsv = rc->block_rsv;
2317	rc->reserved_bytes += num_bytes;
2318
2319	/*
2320	 * We are under a transaction here so we can only do limited flushing.
2321	 * If we get an enospc just kick back -EAGAIN so we know to drop the
2322	 * transaction and try to refill when we can flush all the things.
2323	 */
2324	ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2325				BTRFS_RESERVE_FLUSH_LIMIT);
2326	if (ret) {
2327		tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2328		while (tmp <= rc->reserved_bytes)
2329			tmp <<= 1;
2330		/*
2331		 * only one thread can access block_rsv at this point,
2332		 * so we don't need hold lock to protect block_rsv.
2333		 * we expand more reservation size here to allow enough
2334		 * space for relocation and we will return earlier in
2335		 * enospc case.
2336		 */
2337		rc->block_rsv->size = tmp + fs_info->nodesize *
2338				      RELOCATION_RESERVED_NODES;
2339		return -EAGAIN;
2340	}
2341
2342	return 0;
2343}
2344
2345/*
2346 * relocate a block tree, and then update pointers in upper level
2347 * blocks that reference the block to point to the new location.
2348 *
2349 * if called by link_to_upper, the block has already been relocated.
2350 * in that case this function just updates pointers.
2351 */
2352static int do_relocation(struct btrfs_trans_handle *trans,
2353			 struct reloc_control *rc,
2354			 struct btrfs_backref_node *node,
2355			 struct btrfs_key *key,
2356			 struct btrfs_path *path, int lowest)
2357{
2358	struct btrfs_backref_node *upper;
2359	struct btrfs_backref_edge *edge;
2360	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2361	struct btrfs_root *root;
2362	struct extent_buffer *eb;
2363	u32 blocksize;
2364	u64 bytenr;
2365	int slot;
2366	int ret = 0;
2367
2368	/*
2369	 * If we are lowest then this is the first time we're processing this
2370	 * block, and thus shouldn't have an eb associated with it yet.
2371	 */
2372	ASSERT(!lowest || !node->eb);
2373
2374	path->lowest_level = node->level + 1;
2375	rc->backref_cache.path[node->level] = node;
2376	list_for_each_entry(edge, &node->upper, list[LOWER]) {
2377		struct btrfs_ref ref = { 0 };
2378
2379		cond_resched();
2380
2381		upper = edge->node[UPPER];
2382		root = select_reloc_root(trans, rc, upper, edges);
2383		if (IS_ERR(root)) {
2384			ret = PTR_ERR(root);
2385			goto next;
2386		}
2387
2388		if (upper->eb && !upper->locked) {
2389			if (!lowest) {
2390				ret = btrfs_bin_search(upper->eb, key, &slot);
2391				if (ret < 0)
2392					goto next;
2393				BUG_ON(ret);
2394				bytenr = btrfs_node_blockptr(upper->eb, slot);
2395				if (node->eb->start == bytenr)
2396					goto next;
2397			}
2398			btrfs_backref_drop_node_buffer(upper);
2399		}
2400
2401		if (!upper->eb) {
2402			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2403			if (ret) {
2404				if (ret > 0)
2405					ret = -ENOENT;
2406
2407				btrfs_release_path(path);
2408				break;
2409			}
2410
2411			if (!upper->eb) {
2412				upper->eb = path->nodes[upper->level];
2413				path->nodes[upper->level] = NULL;
2414			} else {
2415				BUG_ON(upper->eb != path->nodes[upper->level]);
2416			}
2417
2418			upper->locked = 1;
2419			path->locks[upper->level] = 0;
2420
2421			slot = path->slots[upper->level];
2422			btrfs_release_path(path);
2423		} else {
2424			ret = btrfs_bin_search(upper->eb, key, &slot);
2425			if (ret < 0)
2426				goto next;
2427			BUG_ON(ret);
2428		}
2429
2430		bytenr = btrfs_node_blockptr(upper->eb, slot);
2431		if (lowest) {
2432			if (bytenr != node->bytenr) {
2433				btrfs_err(root->fs_info,
2434		"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2435					  bytenr, node->bytenr, slot,
2436					  upper->eb->start);
2437				ret = -EIO;
2438				goto next;
2439			}
2440		} else {
2441			if (node->eb->start == bytenr)
2442				goto next;
2443		}
2444
2445		blocksize = root->fs_info->nodesize;
2446		eb = btrfs_read_node_slot(upper->eb, slot);
2447		if (IS_ERR(eb)) {
2448			ret = PTR_ERR(eb);
2449			goto next;
2450		}
2451		btrfs_tree_lock(eb);
2452
2453		if (!node->eb) {
2454			ret = btrfs_cow_block(trans, root, eb, upper->eb,
2455					      slot, &eb, BTRFS_NESTING_COW);
2456			btrfs_tree_unlock(eb);
2457			free_extent_buffer(eb);
2458			if (ret < 0)
2459				goto next;
2460			/*
2461			 * We've just COWed this block, it should have updated
2462			 * the correct backref node entry.
2463			 */
2464			ASSERT(node->eb == eb);
2465		} else {
2466			btrfs_set_node_blockptr(upper->eb, slot,
2467						node->eb->start);
2468			btrfs_set_node_ptr_generation(upper->eb, slot,
2469						      trans->transid);
2470			btrfs_mark_buffer_dirty(upper->eb);
2471
2472			btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2473					       node->eb->start, blocksize,
2474					       upper->eb->start);
2475			ref.real_root = root->root_key.objectid;
2476			btrfs_init_tree_ref(&ref, node->level,
2477					    btrfs_header_owner(upper->eb));
 
2478			ret = btrfs_inc_extent_ref(trans, &ref);
2479			if (!ret)
2480				ret = btrfs_drop_subtree(trans, root, eb,
2481							 upper->eb);
2482			if (ret)
2483				btrfs_abort_transaction(trans, ret);
2484		}
2485next:
2486		if (!upper->pending)
2487			btrfs_backref_drop_node_buffer(upper);
2488		else
2489			btrfs_backref_unlock_node_buffer(upper);
2490		if (ret)
2491			break;
2492	}
2493
2494	if (!ret && node->pending) {
2495		btrfs_backref_drop_node_buffer(node);
2496		list_move_tail(&node->list, &rc->backref_cache.changed);
2497		node->pending = 0;
2498	}
2499
2500	path->lowest_level = 0;
2501
2502	/*
2503	 * We should have allocated all of our space in the block rsv and thus
2504	 * shouldn't ENOSPC.
2505	 */
2506	ASSERT(ret != -ENOSPC);
2507	return ret;
2508}
2509
2510static int link_to_upper(struct btrfs_trans_handle *trans,
2511			 struct reloc_control *rc,
2512			 struct btrfs_backref_node *node,
2513			 struct btrfs_path *path)
2514{
2515	struct btrfs_key key;
2516
2517	btrfs_node_key_to_cpu(node->eb, &key, 0);
2518	return do_relocation(trans, rc, node, &key, path, 0);
2519}
2520
2521static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2522				struct reloc_control *rc,
2523				struct btrfs_path *path, int err)
2524{
2525	LIST_HEAD(list);
2526	struct btrfs_backref_cache *cache = &rc->backref_cache;
2527	struct btrfs_backref_node *node;
2528	int level;
2529	int ret;
2530
2531	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2532		while (!list_empty(&cache->pending[level])) {
2533			node = list_entry(cache->pending[level].next,
2534					  struct btrfs_backref_node, list);
2535			list_move_tail(&node->list, &list);
2536			BUG_ON(!node->pending);
2537
2538			if (!err) {
2539				ret = link_to_upper(trans, rc, node, path);
2540				if (ret < 0)
2541					err = ret;
2542			}
2543		}
2544		list_splice_init(&list, &cache->pending[level]);
2545	}
2546	return err;
2547}
2548
2549/*
2550 * mark a block and all blocks directly/indirectly reference the block
2551 * as processed.
2552 */
2553static void update_processed_blocks(struct reloc_control *rc,
2554				    struct btrfs_backref_node *node)
2555{
2556	struct btrfs_backref_node *next = node;
2557	struct btrfs_backref_edge *edge;
2558	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2559	int index = 0;
2560
2561	while (next) {
2562		cond_resched();
2563		while (1) {
2564			if (next->processed)
2565				break;
2566
2567			mark_block_processed(rc, next);
2568
2569			if (list_empty(&next->upper))
2570				break;
2571
2572			edge = list_entry(next->upper.next,
2573					struct btrfs_backref_edge, list[LOWER]);
2574			edges[index++] = edge;
2575			next = edge->node[UPPER];
2576		}
2577		next = walk_down_backref(edges, &index);
2578	}
2579}
2580
2581static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2582{
2583	u32 blocksize = rc->extent_root->fs_info->nodesize;
2584
2585	if (test_range_bit(&rc->processed_blocks, bytenr,
2586			   bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2587		return 1;
2588	return 0;
2589}
2590
2591static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2592			      struct tree_block *block)
2593{
 
 
 
 
 
2594	struct extent_buffer *eb;
2595
2596	eb = read_tree_block(fs_info, block->bytenr, block->owner,
2597			     block->key.offset, block->level, NULL);
2598	if (IS_ERR(eb)) {
2599		return PTR_ERR(eb);
2600	} else if (!extent_buffer_uptodate(eb)) {
2601		free_extent_buffer(eb);
2602		return -EIO;
2603	}
2604	if (block->level == 0)
2605		btrfs_item_key_to_cpu(eb, &block->key, 0);
2606	else
2607		btrfs_node_key_to_cpu(eb, &block->key, 0);
2608	free_extent_buffer(eb);
2609	block->key_ready = 1;
2610	return 0;
2611}
2612
2613/*
2614 * helper function to relocate a tree block
2615 */
2616static int relocate_tree_block(struct btrfs_trans_handle *trans,
2617				struct reloc_control *rc,
2618				struct btrfs_backref_node *node,
2619				struct btrfs_key *key,
2620				struct btrfs_path *path)
2621{
2622	struct btrfs_root *root;
2623	int ret = 0;
2624
2625	if (!node)
2626		return 0;
2627
2628	/*
2629	 * If we fail here we want to drop our backref_node because we are going
2630	 * to start over and regenerate the tree for it.
2631	 */
2632	ret = reserve_metadata_space(trans, rc, node);
2633	if (ret)
2634		goto out;
2635
2636	BUG_ON(node->processed);
2637	root = select_one_root(node);
2638	if (IS_ERR(root)) {
2639		ret = PTR_ERR(root);
2640
2641		/* See explanation in select_one_root for the -EUCLEAN case. */
2642		ASSERT(ret == -ENOENT);
2643		if (ret == -ENOENT) {
2644			ret = 0;
2645			update_processed_blocks(rc, node);
2646		}
2647		goto out;
2648	}
2649
2650	if (root) {
2651		if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2652			/*
2653			 * This block was the root block of a root, and this is
2654			 * the first time we're processing the block and thus it
2655			 * should not have had the ->new_bytenr modified and
2656			 * should have not been included on the changed list.
2657			 *
2658			 * However in the case of corruption we could have
2659			 * multiple refs pointing to the same block improperly,
2660			 * and thus we would trip over these checks.  ASSERT()
2661			 * for the developer case, because it could indicate a
2662			 * bug in the backref code, however error out for a
2663			 * normal user in the case of corruption.
2664			 */
2665			ASSERT(node->new_bytenr == 0);
2666			ASSERT(list_empty(&node->list));
2667			if (node->new_bytenr || !list_empty(&node->list)) {
2668				btrfs_err(root->fs_info,
2669				  "bytenr %llu has improper references to it",
2670					  node->bytenr);
2671				ret = -EUCLEAN;
2672				goto out;
2673			}
2674			ret = btrfs_record_root_in_trans(trans, root);
2675			if (ret)
2676				goto out;
2677			/*
2678			 * Another thread could have failed, need to check if we
2679			 * have reloc_root actually set.
2680			 */
2681			if (!root->reloc_root) {
2682				ret = -ENOENT;
2683				goto out;
2684			}
2685			root = root->reloc_root;
2686			node->new_bytenr = root->node->start;
2687			btrfs_put_root(node->root);
2688			node->root = btrfs_grab_root(root);
2689			ASSERT(node->root);
2690			list_add_tail(&node->list, &rc->backref_cache.changed);
2691		} else {
2692			path->lowest_level = node->level;
 
 
2693			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2694			btrfs_release_path(path);
 
 
2695			if (ret > 0)
2696				ret = 0;
2697		}
2698		if (!ret)
2699			update_processed_blocks(rc, node);
2700	} else {
2701		ret = do_relocation(trans, rc, node, key, path, 1);
2702	}
2703out:
2704	if (ret || node->level == 0 || node->cowonly)
2705		btrfs_backref_cleanup_node(&rc->backref_cache, node);
2706	return ret;
2707}
2708
2709/*
2710 * relocate a list of blocks
2711 */
2712static noinline_for_stack
2713int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2714			 struct reloc_control *rc, struct rb_root *blocks)
2715{
2716	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2717	struct btrfs_backref_node *node;
2718	struct btrfs_path *path;
2719	struct tree_block *block;
2720	struct tree_block *next;
2721	int ret;
2722	int err = 0;
2723
2724	path = btrfs_alloc_path();
2725	if (!path) {
2726		err = -ENOMEM;
2727		goto out_free_blocks;
2728	}
2729
2730	/* Kick in readahead for tree blocks with missing keys */
2731	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2732		if (!block->key_ready)
2733			btrfs_readahead_tree_block(fs_info, block->bytenr,
2734						   block->owner, 0,
2735						   block->level);
2736	}
2737
2738	/* Get first keys */
2739	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2740		if (!block->key_ready) {
2741			err = get_tree_block_key(fs_info, block);
2742			if (err)
2743				goto out_free_path;
2744		}
2745	}
2746
2747	/* Do tree relocation */
2748	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2749		node = build_backref_tree(rc, &block->key,
2750					  block->level, block->bytenr);
2751		if (IS_ERR(node)) {
2752			err = PTR_ERR(node);
2753			goto out;
2754		}
2755
2756		ret = relocate_tree_block(trans, rc, node, &block->key,
2757					  path);
2758		if (ret < 0) {
2759			err = ret;
2760			break;
2761		}
2762	}
2763out:
2764	err = finish_pending_nodes(trans, rc, path, err);
2765
2766out_free_path:
2767	btrfs_free_path(path);
2768out_free_blocks:
2769	free_block_list(blocks);
2770	return err;
2771}
2772
2773static noinline_for_stack int prealloc_file_extent_cluster(
2774				struct btrfs_inode *inode,
2775				struct file_extent_cluster *cluster)
2776{
2777	u64 alloc_hint = 0;
2778	u64 start;
2779	u64 end;
2780	u64 offset = inode->index_cnt;
2781	u64 num_bytes;
2782	int nr;
2783	int ret = 0;
 
2784	u64 prealloc_start = cluster->start - offset;
2785	u64 prealloc_end = cluster->end - offset;
2786	u64 cur_offset = prealloc_start;
2787
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2788	BUG_ON(cluster->start != cluster->boundary[0]);
2789	ret = btrfs_alloc_data_chunk_ondemand(inode,
2790					      prealloc_end + 1 - prealloc_start);
2791	if (ret)
2792		return ret;
2793
2794	/*
2795	 * On a zoned filesystem, we cannot preallocate the file region.
2796	 * Instead, we dirty and fiemap_write the region.
2797	 */
2798	if (btrfs_is_zoned(inode->root->fs_info)) {
2799		struct btrfs_root *root = inode->root;
2800		struct btrfs_trans_handle *trans;
2801
2802		end = cluster->end - offset + 1;
2803		trans = btrfs_start_transaction(root, 1);
2804		if (IS_ERR(trans))
2805			return PTR_ERR(trans);
2806
2807		inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
2808		i_size_write(&inode->vfs_inode, end);
2809		ret = btrfs_update_inode(trans, root, inode);
2810		if (ret) {
2811			btrfs_abort_transaction(trans, ret);
2812			btrfs_end_transaction(trans);
2813			return ret;
2814		}
2815
2816		return btrfs_end_transaction(trans);
2817	}
2818
2819	btrfs_inode_lock(&inode->vfs_inode, 0);
2820	for (nr = 0; nr < cluster->nr; nr++) {
2821		start = cluster->boundary[nr] - offset;
2822		if (nr + 1 < cluster->nr)
2823			end = cluster->boundary[nr + 1] - 1 - offset;
2824		else
2825			end = cluster->end - offset;
2826
2827		lock_extent(&inode->io_tree, start, end);
2828		num_bytes = end + 1 - start;
2829		ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2830						num_bytes, num_bytes,
2831						end + 1, &alloc_hint);
2832		cur_offset = end + 1;
2833		unlock_extent(&inode->io_tree, start, end);
2834		if (ret)
2835			break;
2836	}
2837	btrfs_inode_unlock(&inode->vfs_inode, 0);
2838
2839	if (cur_offset < prealloc_end)
2840		btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2841					       prealloc_end + 1 - cur_offset);
2842	return ret;
2843}
2844
2845static noinline_for_stack
2846int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2847			 u64 block_start)
2848{
2849	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2850	struct extent_map *em;
 
2851	int ret = 0;
2852
2853	em = alloc_extent_map();
2854	if (!em)
2855		return -ENOMEM;
2856
2857	em->start = start;
2858	em->len = end + 1 - start;
2859	em->block_len = em->len;
2860	em->block_start = block_start;
2861	set_bit(EXTENT_FLAG_PINNED, &em->flags);
2862
2863	lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2864	while (1) {
2865		write_lock(&em_tree->lock);
2866		ret = add_extent_mapping(em_tree, em, 0);
2867		write_unlock(&em_tree->lock);
2868		if (ret != -EEXIST) {
2869			free_extent_map(em);
2870			break;
2871		}
2872		btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
2873	}
2874	unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2875	return ret;
2876}
2877
2878/*
2879 * Allow error injection to test balance/relocation cancellation
2880 */
2881noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2882{
2883	return atomic_read(&fs_info->balance_cancel_req) ||
2884		atomic_read(&fs_info->reloc_cancel_req) ||
2885		fatal_signal_pending(current);
2886}
2887ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2888
2889static int relocate_file_extent_cluster(struct inode *inode,
2890					struct file_extent_cluster *cluster)
 
 
 
 
 
 
 
 
 
 
 
 
2891{
2892	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
 
 
 
2893	u64 page_start;
2894	u64 page_end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2895	u64 offset = BTRFS_I(inode)->index_cnt;
2896	unsigned long index;
2897	unsigned long last_index;
2898	struct page *page;
2899	struct file_ra_state *ra;
2900	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2901	int nr = 0;
2902	int ret = 0;
2903
2904	if (!cluster->nr)
2905		return 0;
2906
2907	ra = kzalloc(sizeof(*ra), GFP_NOFS);
2908	if (!ra)
2909		return -ENOMEM;
2910
2911	ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
2912	if (ret)
2913		goto out;
2914
2915	file_ra_state_init(ra, inode->i_mapping);
2916
2917	ret = setup_extent_mapping(inode, cluster->start - offset,
2918				   cluster->end - offset, cluster->start);
2919	if (ret)
2920		goto out;
2921
2922	index = (cluster->start - offset) >> PAGE_SHIFT;
2923	last_index = (cluster->end - offset) >> PAGE_SHIFT;
2924	while (index <= last_index) {
2925		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
2926				PAGE_SIZE);
2927		if (ret)
2928			goto out;
2929
2930		page = find_lock_page(inode->i_mapping, index);
2931		if (!page) {
2932			page_cache_sync_readahead(inode->i_mapping,
2933						  ra, NULL, index,
2934						  last_index + 1 - index);
2935			page = find_or_create_page(inode->i_mapping, index,
2936						   mask);
2937			if (!page) {
2938				btrfs_delalloc_release_metadata(BTRFS_I(inode),
2939							PAGE_SIZE, true);
2940				btrfs_delalloc_release_extents(BTRFS_I(inode),
2941							PAGE_SIZE);
2942				ret = -ENOMEM;
2943				goto out;
2944			}
2945		}
2946		ret = set_page_extent_mapped(page);
2947		if (ret < 0) {
2948			btrfs_delalloc_release_metadata(BTRFS_I(inode),
2949							PAGE_SIZE, true);
2950			btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2951			unlock_page(page);
2952			put_page(page);
2953			goto out;
2954		}
2955
2956		if (PageReadahead(page)) {
2957			page_cache_async_readahead(inode->i_mapping,
2958						   ra, NULL, page, index,
2959						   last_index + 1 - index);
2960		}
2961
2962		if (!PageUptodate(page)) {
2963			btrfs_readpage(NULL, page);
2964			lock_page(page);
2965			if (!PageUptodate(page)) {
2966				unlock_page(page);
2967				put_page(page);
2968				btrfs_delalloc_release_metadata(BTRFS_I(inode),
2969							PAGE_SIZE, true);
2970				btrfs_delalloc_release_extents(BTRFS_I(inode),
2971							       PAGE_SIZE);
2972				ret = -EIO;
2973				goto out;
2974			}
2975		}
2976
2977		page_start = page_offset(page);
2978		page_end = page_start + PAGE_SIZE - 1;
2979
2980		lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
2981
2982		if (nr < cluster->nr &&
2983		    page_start + offset == cluster->boundary[nr]) {
2984			set_extent_bits(&BTRFS_I(inode)->io_tree,
2985					page_start, page_end,
2986					EXTENT_BOUNDARY);
2987			nr++;
2988		}
2989
2990		ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start,
2991						page_end, 0, NULL);
2992		if (ret) {
2993			unlock_page(page);
2994			put_page(page);
2995			btrfs_delalloc_release_metadata(BTRFS_I(inode),
2996							 PAGE_SIZE, true);
2997			btrfs_delalloc_release_extents(BTRFS_I(inode),
2998			                               PAGE_SIZE);
2999
3000			clear_extent_bits(&BTRFS_I(inode)->io_tree,
3001					  page_start, page_end,
3002					  EXTENT_LOCKED | EXTENT_BOUNDARY);
3003			goto out;
3004
3005		}
3006		set_page_dirty(page);
3007
3008		unlock_extent(&BTRFS_I(inode)->io_tree,
3009			      page_start, page_end);
3010		unlock_page(page);
3011		put_page(page);
3012
3013		index++;
3014		btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
3015		balance_dirty_pages_ratelimited(inode->i_mapping);
3016		btrfs_throttle(fs_info);
3017		if (btrfs_should_cancel_balance(fs_info)) {
3018			ret = -ECANCELED;
3019			goto out;
3020		}
3021	}
3022	WARN_ON(nr != cluster->nr);
3023	if (btrfs_is_zoned(fs_info) && !ret)
3024		ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
3025out:
3026	kfree(ra);
3027	return ret;
3028}
3029
3030static noinline_for_stack
3031int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3032			 struct file_extent_cluster *cluster)
3033{
3034	int ret;
3035
3036	if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3037		ret = relocate_file_extent_cluster(inode, cluster);
3038		if (ret)
3039			return ret;
3040		cluster->nr = 0;
3041	}
3042
3043	if (!cluster->nr)
3044		cluster->start = extent_key->objectid;
3045	else
3046		BUG_ON(cluster->nr >= MAX_EXTENTS);
3047	cluster->end = extent_key->objectid + extent_key->offset - 1;
3048	cluster->boundary[cluster->nr] = extent_key->objectid;
3049	cluster->nr++;
3050
3051	if (cluster->nr >= MAX_EXTENTS) {
3052		ret = relocate_file_extent_cluster(inode, cluster);
3053		if (ret)
3054			return ret;
3055		cluster->nr = 0;
3056	}
3057	return 0;
3058}
3059
3060/*
3061 * helper to add a tree block to the list.
3062 * the major work is getting the generation and level of the block
3063 */
3064static int add_tree_block(struct reloc_control *rc,
3065			  struct btrfs_key *extent_key,
3066			  struct btrfs_path *path,
3067			  struct rb_root *blocks)
3068{
3069	struct extent_buffer *eb;
3070	struct btrfs_extent_item *ei;
3071	struct btrfs_tree_block_info *bi;
3072	struct tree_block *block;
3073	struct rb_node *rb_node;
3074	u32 item_size;
3075	int level = -1;
3076	u64 generation;
3077	u64 owner = 0;
3078
3079	eb =  path->nodes[0];
3080	item_size = btrfs_item_size_nr(eb, path->slots[0]);
3081
3082	if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3083	    item_size >= sizeof(*ei) + sizeof(*bi)) {
3084		unsigned long ptr = 0, end;
3085
3086		ei = btrfs_item_ptr(eb, path->slots[0],
3087				struct btrfs_extent_item);
3088		end = (unsigned long)ei + item_size;
3089		if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3090			bi = (struct btrfs_tree_block_info *)(ei + 1);
3091			level = btrfs_tree_block_level(eb, bi);
3092			ptr = (unsigned long)(bi + 1);
3093		} else {
3094			level = (int)extent_key->offset;
3095			ptr = (unsigned long)(ei + 1);
3096		}
3097		generation = btrfs_extent_generation(eb, ei);
3098
3099		/*
3100		 * We're reading random blocks without knowing their owner ahead
3101		 * of time.  This is ok most of the time, as all reloc roots and
3102		 * fs roots have the same lock type.  However normal trees do
3103		 * not, and the only way to know ahead of time is to read the
3104		 * inline ref offset.  We know it's an fs root if
3105		 *
3106		 * 1. There's more than one ref.
3107		 * 2. There's a SHARED_DATA_REF_KEY set.
3108		 * 3. FULL_BACKREF is set on the flags.
3109		 *
3110		 * Otherwise it's safe to assume that the ref offset == the
3111		 * owner of this block, so we can use that when calling
3112		 * read_tree_block.
3113		 */
3114		if (btrfs_extent_refs(eb, ei) == 1 &&
3115		    !(btrfs_extent_flags(eb, ei) &
3116		      BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3117		    ptr < end) {
3118			struct btrfs_extent_inline_ref *iref;
3119			int type;
3120
3121			iref = (struct btrfs_extent_inline_ref *)ptr;
3122			type = btrfs_get_extent_inline_ref_type(eb, iref,
3123							BTRFS_REF_TYPE_BLOCK);
3124			if (type == BTRFS_REF_TYPE_INVALID)
3125				return -EINVAL;
3126			if (type == BTRFS_TREE_BLOCK_REF_KEY)
3127				owner = btrfs_extent_inline_ref_offset(eb, iref);
3128		}
3129	} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3130		btrfs_print_v0_err(eb->fs_info);
3131		btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3132		return -EINVAL;
3133	} else {
3134		BUG();
3135	}
3136
3137	btrfs_release_path(path);
3138
3139	BUG_ON(level == -1);
3140
3141	block = kmalloc(sizeof(*block), GFP_NOFS);
3142	if (!block)
3143		return -ENOMEM;
3144
3145	block->bytenr = extent_key->objectid;
3146	block->key.objectid = rc->extent_root->fs_info->nodesize;
3147	block->key.offset = generation;
3148	block->level = level;
3149	block->key_ready = 0;
3150	block->owner = owner;
3151
3152	rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3153	if (rb_node)
3154		btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3155				    -EEXIST);
3156
3157	return 0;
3158}
3159
3160/*
3161 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3162 */
3163static int __add_tree_block(struct reloc_control *rc,
3164			    u64 bytenr, u32 blocksize,
3165			    struct rb_root *blocks)
3166{
3167	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3168	struct btrfs_path *path;
3169	struct btrfs_key key;
3170	int ret;
3171	bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3172
3173	if (tree_block_processed(bytenr, rc))
3174		return 0;
3175
3176	if (rb_simple_search(blocks, bytenr))
3177		return 0;
3178
3179	path = btrfs_alloc_path();
3180	if (!path)
3181		return -ENOMEM;
3182again:
3183	key.objectid = bytenr;
3184	if (skinny) {
3185		key.type = BTRFS_METADATA_ITEM_KEY;
3186		key.offset = (u64)-1;
3187	} else {
3188		key.type = BTRFS_EXTENT_ITEM_KEY;
3189		key.offset = blocksize;
3190	}
3191
3192	path->search_commit_root = 1;
3193	path->skip_locking = 1;
3194	ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3195	if (ret < 0)
3196		goto out;
3197
3198	if (ret > 0 && skinny) {
3199		if (path->slots[0]) {
3200			path->slots[0]--;
3201			btrfs_item_key_to_cpu(path->nodes[0], &key,
3202					      path->slots[0]);
3203			if (key.objectid == bytenr &&
3204			    (key.type == BTRFS_METADATA_ITEM_KEY ||
3205			     (key.type == BTRFS_EXTENT_ITEM_KEY &&
3206			      key.offset == blocksize)))
3207				ret = 0;
3208		}
3209
3210		if (ret) {
3211			skinny = false;
3212			btrfs_release_path(path);
3213			goto again;
3214		}
3215	}
3216	if (ret) {
3217		ASSERT(ret == 1);
3218		btrfs_print_leaf(path->nodes[0]);
3219		btrfs_err(fs_info,
3220	     "tree block extent item (%llu) is not found in extent tree",
3221		     bytenr);
3222		WARN_ON(1);
3223		ret = -EINVAL;
3224		goto out;
3225	}
3226
3227	ret = add_tree_block(rc, &key, path, blocks);
3228out:
3229	btrfs_free_path(path);
3230	return ret;
3231}
3232
3233static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3234				    struct btrfs_block_group *block_group,
3235				    struct inode *inode,
3236				    u64 ino)
3237{
3238	struct btrfs_root *root = fs_info->tree_root;
3239	struct btrfs_trans_handle *trans;
3240	int ret = 0;
3241
3242	if (inode)
3243		goto truncate;
3244
3245	inode = btrfs_iget(fs_info->sb, ino, root);
3246	if (IS_ERR(inode))
3247		return -ENOENT;
3248
3249truncate:
3250	ret = btrfs_check_trunc_cache_free_space(fs_info,
3251						 &fs_info->global_block_rsv);
3252	if (ret)
3253		goto out;
3254
3255	trans = btrfs_join_transaction(root);
3256	if (IS_ERR(trans)) {
3257		ret = PTR_ERR(trans);
3258		goto out;
3259	}
3260
3261	ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3262
3263	btrfs_end_transaction(trans);
3264	btrfs_btree_balance_dirty(fs_info);
3265out:
3266	iput(inode);
3267	return ret;
3268}
3269
3270/*
3271 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3272 * cache inode, to avoid free space cache data extent blocking data relocation.
3273 */
3274static int delete_v1_space_cache(struct extent_buffer *leaf,
3275				 struct btrfs_block_group *block_group,
3276				 u64 data_bytenr)
3277{
3278	u64 space_cache_ino;
3279	struct btrfs_file_extent_item *ei;
3280	struct btrfs_key key;
3281	bool found = false;
3282	int i;
3283	int ret;
3284
3285	if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3286		return 0;
3287
3288	for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3289		u8 type;
3290
3291		btrfs_item_key_to_cpu(leaf, &key, i);
3292		if (key.type != BTRFS_EXTENT_DATA_KEY)
3293			continue;
3294		ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3295		type = btrfs_file_extent_type(leaf, ei);
3296
3297		if ((type == BTRFS_FILE_EXTENT_REG ||
3298		     type == BTRFS_FILE_EXTENT_PREALLOC) &&
3299		    btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3300			found = true;
3301			space_cache_ino = key.objectid;
3302			break;
3303		}
3304	}
3305	if (!found)
3306		return -ENOENT;
3307	ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3308					space_cache_ino);
3309	return ret;
3310}
3311
3312/*
3313 * helper to find all tree blocks that reference a given data extent
3314 */
3315static noinline_for_stack
3316int add_data_references(struct reloc_control *rc,
3317			struct btrfs_key *extent_key,
3318			struct btrfs_path *path,
3319			struct rb_root *blocks)
3320{
3321	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3322	struct ulist *leaves = NULL;
3323	struct ulist_iterator leaf_uiter;
3324	struct ulist_node *ref_node = NULL;
3325	const u32 blocksize = fs_info->nodesize;
3326	int ret = 0;
3327
3328	btrfs_release_path(path);
3329	ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
3330				   0, &leaves, NULL, true);
 
 
 
 
3331	if (ret < 0)
3332		return ret;
3333
3334	ULIST_ITER_INIT(&leaf_uiter);
3335	while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
 
3336		struct extent_buffer *eb;
3337
3338		eb = read_tree_block(fs_info, ref_node->val, 0, 0, 0, NULL);
3339		if (IS_ERR(eb)) {
3340			ret = PTR_ERR(eb);
3341			break;
3342		}
3343		ret = delete_v1_space_cache(eb, rc->block_group,
3344					    extent_key->objectid);
3345		free_extent_buffer(eb);
3346		if (ret < 0)
3347			break;
3348		ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3349		if (ret < 0)
3350			break;
3351	}
3352	if (ret < 0)
3353		free_block_list(blocks);
3354	ulist_free(leaves);
3355	return ret;
3356}
3357
3358/*
3359 * helper to find next unprocessed extent
3360 */
3361static noinline_for_stack
3362int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3363		     struct btrfs_key *extent_key)
3364{
3365	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3366	struct btrfs_key key;
3367	struct extent_buffer *leaf;
3368	u64 start, end, last;
3369	int ret;
3370
3371	last = rc->block_group->start + rc->block_group->length;
3372	while (1) {
3373		cond_resched();
3374		if (rc->search_start >= last) {
3375			ret = 1;
3376			break;
3377		}
3378
3379		key.objectid = rc->search_start;
3380		key.type = BTRFS_EXTENT_ITEM_KEY;
3381		key.offset = 0;
3382
3383		path->search_commit_root = 1;
3384		path->skip_locking = 1;
3385		ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3386					0, 0);
3387		if (ret < 0)
3388			break;
3389next:
3390		leaf = path->nodes[0];
3391		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3392			ret = btrfs_next_leaf(rc->extent_root, path);
3393			if (ret != 0)
3394				break;
3395			leaf = path->nodes[0];
3396		}
3397
3398		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3399		if (key.objectid >= last) {
3400			ret = 1;
3401			break;
3402		}
3403
3404		if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3405		    key.type != BTRFS_METADATA_ITEM_KEY) {
3406			path->slots[0]++;
3407			goto next;
3408		}
3409
3410		if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3411		    key.objectid + key.offset <= rc->search_start) {
3412			path->slots[0]++;
3413			goto next;
3414		}
3415
3416		if (key.type == BTRFS_METADATA_ITEM_KEY &&
3417		    key.objectid + fs_info->nodesize <=
3418		    rc->search_start) {
3419			path->slots[0]++;
3420			goto next;
3421		}
3422
3423		ret = find_first_extent_bit(&rc->processed_blocks,
3424					    key.objectid, &start, &end,
3425					    EXTENT_DIRTY, NULL);
3426
3427		if (ret == 0 && start <= key.objectid) {
3428			btrfs_release_path(path);
3429			rc->search_start = end + 1;
3430		} else {
3431			if (key.type == BTRFS_EXTENT_ITEM_KEY)
3432				rc->search_start = key.objectid + key.offset;
3433			else
3434				rc->search_start = key.objectid +
3435					fs_info->nodesize;
3436			memcpy(extent_key, &key, sizeof(key));
3437			return 0;
3438		}
3439	}
3440	btrfs_release_path(path);
3441	return ret;
3442}
3443
3444static void set_reloc_control(struct reloc_control *rc)
3445{
3446	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3447
3448	mutex_lock(&fs_info->reloc_mutex);
3449	fs_info->reloc_ctl = rc;
3450	mutex_unlock(&fs_info->reloc_mutex);
3451}
3452
3453static void unset_reloc_control(struct reloc_control *rc)
3454{
3455	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3456
3457	mutex_lock(&fs_info->reloc_mutex);
3458	fs_info->reloc_ctl = NULL;
3459	mutex_unlock(&fs_info->reloc_mutex);
3460}
3461
3462static noinline_for_stack
3463int prepare_to_relocate(struct reloc_control *rc)
3464{
3465	struct btrfs_trans_handle *trans;
3466	int ret;
3467
3468	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3469					      BTRFS_BLOCK_RSV_TEMP);
3470	if (!rc->block_rsv)
3471		return -ENOMEM;
3472
3473	memset(&rc->cluster, 0, sizeof(rc->cluster));
3474	rc->search_start = rc->block_group->start;
3475	rc->extents_found = 0;
3476	rc->nodes_relocated = 0;
3477	rc->merging_rsv_size = 0;
3478	rc->reserved_bytes = 0;
3479	rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3480			      RELOCATION_RESERVED_NODES;
3481	ret = btrfs_block_rsv_refill(rc->extent_root,
3482				     rc->block_rsv, rc->block_rsv->size,
3483				     BTRFS_RESERVE_FLUSH_ALL);
3484	if (ret)
3485		return ret;
3486
3487	rc->create_reloc_tree = 1;
3488	set_reloc_control(rc);
3489
3490	trans = btrfs_join_transaction(rc->extent_root);
3491	if (IS_ERR(trans)) {
3492		unset_reloc_control(rc);
3493		/*
3494		 * extent tree is not a ref_cow tree and has no reloc_root to
3495		 * cleanup.  And callers are responsible to free the above
3496		 * block rsv.
3497		 */
3498		return PTR_ERR(trans);
3499	}
3500	return btrfs_commit_transaction(trans);
 
 
 
 
 
3501}
3502
3503static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3504{
3505	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3506	struct rb_root blocks = RB_ROOT;
3507	struct btrfs_key key;
3508	struct btrfs_trans_handle *trans = NULL;
3509	struct btrfs_path *path;
3510	struct btrfs_extent_item *ei;
3511	u64 flags;
3512	int ret;
3513	int err = 0;
3514	int progress = 0;
3515
3516	path = btrfs_alloc_path();
3517	if (!path)
3518		return -ENOMEM;
3519	path->reada = READA_FORWARD;
3520
3521	ret = prepare_to_relocate(rc);
3522	if (ret) {
3523		err = ret;
3524		goto out_free;
3525	}
3526
3527	while (1) {
3528		rc->reserved_bytes = 0;
3529		ret = btrfs_block_rsv_refill(rc->extent_root,
3530					rc->block_rsv, rc->block_rsv->size,
3531					BTRFS_RESERVE_FLUSH_ALL);
3532		if (ret) {
3533			err = ret;
3534			break;
3535		}
3536		progress++;
3537		trans = btrfs_start_transaction(rc->extent_root, 0);
3538		if (IS_ERR(trans)) {
3539			err = PTR_ERR(trans);
3540			trans = NULL;
3541			break;
3542		}
3543restart:
3544		if (update_backref_cache(trans, &rc->backref_cache)) {
3545			btrfs_end_transaction(trans);
3546			trans = NULL;
3547			continue;
3548		}
3549
3550		ret = find_next_extent(rc, path, &key);
3551		if (ret < 0)
3552			err = ret;
3553		if (ret != 0)
3554			break;
3555
3556		rc->extents_found++;
3557
3558		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3559				    struct btrfs_extent_item);
3560		flags = btrfs_extent_flags(path->nodes[0], ei);
3561
3562		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3563			ret = add_tree_block(rc, &key, path, &blocks);
3564		} else if (rc->stage == UPDATE_DATA_PTRS &&
3565			   (flags & BTRFS_EXTENT_FLAG_DATA)) {
3566			ret = add_data_references(rc, &key, path, &blocks);
3567		} else {
3568			btrfs_release_path(path);
3569			ret = 0;
3570		}
3571		if (ret < 0) {
3572			err = ret;
3573			break;
3574		}
3575
3576		if (!RB_EMPTY_ROOT(&blocks)) {
3577			ret = relocate_tree_blocks(trans, rc, &blocks);
3578			if (ret < 0) {
3579				if (ret != -EAGAIN) {
3580					err = ret;
3581					break;
3582				}
3583				rc->extents_found--;
3584				rc->search_start = key.objectid;
3585			}
3586		}
3587
3588		btrfs_end_transaction_throttle(trans);
3589		btrfs_btree_balance_dirty(fs_info);
3590		trans = NULL;
3591
3592		if (rc->stage == MOVE_DATA_EXTENTS &&
3593		    (flags & BTRFS_EXTENT_FLAG_DATA)) {
3594			rc->found_file_extent = 1;
3595			ret = relocate_data_extent(rc->data_inode,
3596						   &key, &rc->cluster);
3597			if (ret < 0) {
3598				err = ret;
3599				break;
3600			}
3601		}
3602		if (btrfs_should_cancel_balance(fs_info)) {
3603			err = -ECANCELED;
3604			break;
3605		}
3606	}
3607	if (trans && progress && err == -ENOSPC) {
3608		ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3609		if (ret == 1) {
3610			err = 0;
3611			progress = 0;
3612			goto restart;
3613		}
3614	}
3615
3616	btrfs_release_path(path);
3617	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3618
3619	if (trans) {
3620		btrfs_end_transaction_throttle(trans);
3621		btrfs_btree_balance_dirty(fs_info);
3622	}
3623
3624	if (!err) {
3625		ret = relocate_file_extent_cluster(rc->data_inode,
3626						   &rc->cluster);
3627		if (ret < 0)
3628			err = ret;
3629	}
3630
3631	rc->create_reloc_tree = 0;
3632	set_reloc_control(rc);
3633
3634	btrfs_backref_release_cache(&rc->backref_cache);
3635	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3636
3637	/*
3638	 * Even in the case when the relocation is cancelled, we should all go
3639	 * through prepare_to_merge() and merge_reloc_roots().
3640	 *
3641	 * For error (including cancelled balance), prepare_to_merge() will
3642	 * mark all reloc trees orphan, then queue them for cleanup in
3643	 * merge_reloc_roots()
3644	 */
3645	err = prepare_to_merge(rc, err);
3646
3647	merge_reloc_roots(rc);
3648
3649	rc->merge_reloc_tree = 0;
3650	unset_reloc_control(rc);
3651	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3652
3653	/* get rid of pinned extents */
3654	trans = btrfs_join_transaction(rc->extent_root);
3655	if (IS_ERR(trans)) {
3656		err = PTR_ERR(trans);
3657		goto out_free;
3658	}
3659	ret = btrfs_commit_transaction(trans);
3660	if (ret && !err)
3661		err = ret;
3662out_free:
3663	ret = clean_dirty_subvols(rc);
3664	if (ret < 0 && !err)
3665		err = ret;
3666	btrfs_free_block_rsv(fs_info, rc->block_rsv);
3667	btrfs_free_path(path);
3668	return err;
3669}
3670
3671static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3672				 struct btrfs_root *root, u64 objectid)
3673{
3674	struct btrfs_path *path;
3675	struct btrfs_inode_item *item;
3676	struct extent_buffer *leaf;
3677	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
3678	int ret;
3679
3680	if (btrfs_is_zoned(trans->fs_info))
3681		flags &= ~BTRFS_INODE_PREALLOC;
3682
3683	path = btrfs_alloc_path();
3684	if (!path)
3685		return -ENOMEM;
3686
3687	ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3688	if (ret)
3689		goto out;
3690
3691	leaf = path->nodes[0];
3692	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3693	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3694	btrfs_set_inode_generation(leaf, item, 1);
3695	btrfs_set_inode_size(leaf, item, 0);
3696	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3697	btrfs_set_inode_flags(leaf, item, flags);
 
3698	btrfs_mark_buffer_dirty(leaf);
3699out:
3700	btrfs_free_path(path);
3701	return ret;
3702}
3703
3704static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3705				struct btrfs_root *root, u64 objectid)
3706{
3707	struct btrfs_path *path;
3708	struct btrfs_key key;
3709	int ret = 0;
3710
3711	path = btrfs_alloc_path();
3712	if (!path) {
3713		ret = -ENOMEM;
3714		goto out;
3715	}
3716
3717	key.objectid = objectid;
3718	key.type = BTRFS_INODE_ITEM_KEY;
3719	key.offset = 0;
3720	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3721	if (ret) {
3722		if (ret > 0)
3723			ret = -ENOENT;
3724		goto out;
3725	}
3726	ret = btrfs_del_item(trans, root, path);
3727out:
3728	if (ret)
3729		btrfs_abort_transaction(trans, ret);
3730	btrfs_free_path(path);
3731}
3732
3733/*
3734 * helper to create inode for data relocation.
3735 * the inode is in data relocation tree and its link count is 0
3736 */
3737static noinline_for_stack
3738struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3739				 struct btrfs_block_group *group)
3740{
3741	struct inode *inode = NULL;
3742	struct btrfs_trans_handle *trans;
3743	struct btrfs_root *root;
3744	u64 objectid;
3745	int err = 0;
3746
3747	root = btrfs_grab_root(fs_info->data_reloc_root);
3748	trans = btrfs_start_transaction(root, 6);
3749	if (IS_ERR(trans)) {
3750		btrfs_put_root(root);
3751		return ERR_CAST(trans);
3752	}
3753
3754	err = btrfs_get_free_objectid(root, &objectid);
3755	if (err)
3756		goto out;
3757
3758	err = __insert_orphan_inode(trans, root, objectid);
3759	if (err)
3760		goto out;
3761
3762	inode = btrfs_iget(fs_info->sb, objectid, root);
3763	if (IS_ERR(inode)) {
3764		delete_orphan_inode(trans, root, objectid);
3765		err = PTR_ERR(inode);
3766		inode = NULL;
3767		goto out;
3768	}
3769	BTRFS_I(inode)->index_cnt = group->start;
3770
3771	err = btrfs_orphan_add(trans, BTRFS_I(inode));
3772out:
3773	btrfs_put_root(root);
3774	btrfs_end_transaction(trans);
3775	btrfs_btree_balance_dirty(fs_info);
3776	if (err) {
3777		if (inode)
3778			iput(inode);
3779		inode = ERR_PTR(err);
3780	}
3781	return inode;
3782}
3783
3784/*
3785 * Mark start of chunk relocation that is cancellable. Check if the cancellation
3786 * has been requested meanwhile and don't start in that case.
3787 *
3788 * Return:
3789 *   0             success
3790 *   -EINPROGRESS  operation is already in progress, that's probably a bug
3791 *   -ECANCELED    cancellation request was set before the operation started
3792 *   -EAGAIN       can not start because there are ongoing send operations
3793 */
3794static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3795{
3796	spin_lock(&fs_info->send_reloc_lock);
3797	if (fs_info->send_in_progress) {
3798		btrfs_warn_rl(fs_info,
3799"cannot run relocation while send operations are in progress (%d in progress)",
3800			      fs_info->send_in_progress);
3801		spin_unlock(&fs_info->send_reloc_lock);
3802		return -EAGAIN;
3803	}
3804	if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3805		/* This should not happen */
3806		spin_unlock(&fs_info->send_reloc_lock);
3807		btrfs_err(fs_info, "reloc already running, cannot start");
3808		return -EINPROGRESS;
3809	}
3810	spin_unlock(&fs_info->send_reloc_lock);
3811
3812	if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3813		btrfs_info(fs_info, "chunk relocation canceled on start");
3814		/*
3815		 * On cancel, clear all requests but let the caller mark
3816		 * the end after cleanup operations.
3817		 */
3818		atomic_set(&fs_info->reloc_cancel_req, 0);
3819		return -ECANCELED;
3820	}
3821	return 0;
3822}
3823
3824/*
3825 * Mark end of chunk relocation that is cancellable and wake any waiters.
3826 */
3827static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3828{
3829	/* Requested after start, clear bit first so any waiters can continue */
3830	if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3831		btrfs_info(fs_info, "chunk relocation canceled during operation");
3832	spin_lock(&fs_info->send_reloc_lock);
3833	clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3834	spin_unlock(&fs_info->send_reloc_lock);
3835	atomic_set(&fs_info->reloc_cancel_req, 0);
3836}
3837
3838static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3839{
3840	struct reloc_control *rc;
3841
3842	rc = kzalloc(sizeof(*rc), GFP_NOFS);
3843	if (!rc)
3844		return NULL;
3845
3846	INIT_LIST_HEAD(&rc->reloc_roots);
3847	INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3848	btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3849	mapping_tree_init(&rc->reloc_root_tree);
3850	extent_io_tree_init(fs_info, &rc->processed_blocks,
3851			    IO_TREE_RELOC_BLOCKS, NULL);
3852	return rc;
3853}
3854
3855static void free_reloc_control(struct reloc_control *rc)
3856{
3857	struct mapping_node *node, *tmp;
3858
3859	free_reloc_roots(&rc->reloc_roots);
3860	rbtree_postorder_for_each_entry_safe(node, tmp,
3861			&rc->reloc_root_tree.rb_root, rb_node)
3862		kfree(node);
3863
3864	kfree(rc);
3865}
3866
3867/*
3868 * Print the block group being relocated
3869 */
3870static void describe_relocation(struct btrfs_fs_info *fs_info,
3871				struct btrfs_block_group *block_group)
3872{
3873	char buf[128] = {'\0'};
3874
3875	btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3876
3877	btrfs_info(fs_info,
3878		   "relocating block group %llu flags %s",
3879		   block_group->start, buf);
3880}
3881
3882static const char *stage_to_string(int stage)
3883{
3884	if (stage == MOVE_DATA_EXTENTS)
3885		return "move data extents";
3886	if (stage == UPDATE_DATA_PTRS)
3887		return "update data pointers";
3888	return "unknown";
3889}
3890
3891/*
3892 * function to relocate all extents in a block group.
3893 */
3894int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3895{
3896	struct btrfs_block_group *bg;
3897	struct btrfs_root *extent_root = fs_info->extent_root;
3898	struct reloc_control *rc;
3899	struct inode *inode;
3900	struct btrfs_path *path;
3901	int ret;
3902	int rw = 0;
3903	int err = 0;
3904
 
 
 
 
 
 
 
 
 
 
 
 
 
3905	bg = btrfs_lookup_block_group(fs_info, group_start);
3906	if (!bg)
3907		return -ENOENT;
3908
 
 
 
 
 
 
 
 
 
 
 
3909	if (btrfs_pinned_by_swapfile(fs_info, bg)) {
3910		btrfs_put_block_group(bg);
3911		return -ETXTBSY;
3912	}
3913
3914	rc = alloc_reloc_control(fs_info);
3915	if (!rc) {
3916		btrfs_put_block_group(bg);
3917		return -ENOMEM;
3918	}
3919
3920	ret = reloc_chunk_start(fs_info);
3921	if (ret < 0) {
3922		err = ret;
3923		goto out_put_bg;
3924	}
3925
3926	rc->extent_root = extent_root;
3927	rc->block_group = bg;
3928
3929	ret = btrfs_inc_block_group_ro(rc->block_group, true);
3930	if (ret) {
3931		err = ret;
3932		goto out;
3933	}
3934	rw = 1;
3935
3936	path = btrfs_alloc_path();
3937	if (!path) {
3938		err = -ENOMEM;
3939		goto out;
3940	}
3941
3942	inode = lookup_free_space_inode(rc->block_group, path);
3943	btrfs_free_path(path);
3944
3945	if (!IS_ERR(inode))
3946		ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
3947	else
3948		ret = PTR_ERR(inode);
3949
3950	if (ret && ret != -ENOENT) {
3951		err = ret;
3952		goto out;
3953	}
3954
3955	rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
3956	if (IS_ERR(rc->data_inode)) {
3957		err = PTR_ERR(rc->data_inode);
3958		rc->data_inode = NULL;
3959		goto out;
3960	}
3961
3962	describe_relocation(fs_info, rc->block_group);
3963
3964	btrfs_wait_block_group_reservations(rc->block_group);
3965	btrfs_wait_nocow_writers(rc->block_group);
3966	btrfs_wait_ordered_roots(fs_info, U64_MAX,
3967				 rc->block_group->start,
3968				 rc->block_group->length);
3969
 
 
 
3970	while (1) {
3971		int finishes_stage;
3972
3973		mutex_lock(&fs_info->cleaner_mutex);
3974		ret = relocate_block_group(rc);
3975		mutex_unlock(&fs_info->cleaner_mutex);
3976		if (ret < 0)
3977			err = ret;
3978
3979		finishes_stage = rc->stage;
3980		/*
3981		 * We may have gotten ENOSPC after we already dirtied some
3982		 * extents.  If writeout happens while we're relocating a
3983		 * different block group we could end up hitting the
3984		 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
3985		 * btrfs_reloc_cow_block.  Make sure we write everything out
3986		 * properly so we don't trip over this problem, and then break
3987		 * out of the loop if we hit an error.
3988		 */
3989		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
3990			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
3991						       (u64)-1);
3992			if (ret)
3993				err = ret;
3994			invalidate_mapping_pages(rc->data_inode->i_mapping,
3995						 0, -1);
3996			rc->stage = UPDATE_DATA_PTRS;
3997		}
3998
3999		if (err < 0)
4000			goto out;
4001
4002		if (rc->extents_found == 0)
4003			break;
4004
4005		btrfs_info(fs_info, "found %llu extents, stage: %s",
4006			   rc->extents_found, stage_to_string(finishes_stage));
4007	}
4008
4009	WARN_ON(rc->block_group->pinned > 0);
4010	WARN_ON(rc->block_group->reserved > 0);
4011	WARN_ON(rc->block_group->used > 0);
4012out:
4013	if (err && rw)
4014		btrfs_dec_block_group_ro(rc->block_group);
4015	iput(rc->data_inode);
4016out_put_bg:
4017	btrfs_put_block_group(bg);
4018	reloc_chunk_end(fs_info);
4019	free_reloc_control(rc);
4020	return err;
4021}
4022
4023static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4024{
4025	struct btrfs_fs_info *fs_info = root->fs_info;
4026	struct btrfs_trans_handle *trans;
4027	int ret, err;
4028
4029	trans = btrfs_start_transaction(fs_info->tree_root, 0);
4030	if (IS_ERR(trans))
4031		return PTR_ERR(trans);
4032
4033	memset(&root->root_item.drop_progress, 0,
4034		sizeof(root->root_item.drop_progress));
4035	btrfs_set_root_drop_level(&root->root_item, 0);
4036	btrfs_set_root_refs(&root->root_item, 0);
4037	ret = btrfs_update_root(trans, fs_info->tree_root,
4038				&root->root_key, &root->root_item);
4039
4040	err = btrfs_end_transaction(trans);
4041	if (err)
4042		return err;
4043	return ret;
4044}
4045
4046/*
4047 * recover relocation interrupted by system crash.
4048 *
4049 * this function resumes merging reloc trees with corresponding fs trees.
4050 * this is important for keeping the sharing of tree blocks
4051 */
4052int btrfs_recover_relocation(struct btrfs_root *root)
4053{
4054	struct btrfs_fs_info *fs_info = root->fs_info;
4055	LIST_HEAD(reloc_roots);
4056	struct btrfs_key key;
4057	struct btrfs_root *fs_root;
4058	struct btrfs_root *reloc_root;
4059	struct btrfs_path *path;
4060	struct extent_buffer *leaf;
4061	struct reloc_control *rc = NULL;
4062	struct btrfs_trans_handle *trans;
4063	int ret;
4064	int err = 0;
4065
4066	path = btrfs_alloc_path();
4067	if (!path)
4068		return -ENOMEM;
4069	path->reada = READA_BACK;
4070
4071	key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4072	key.type = BTRFS_ROOT_ITEM_KEY;
4073	key.offset = (u64)-1;
4074
4075	while (1) {
4076		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4077					path, 0, 0);
4078		if (ret < 0) {
4079			err = ret;
4080			goto out;
4081		}
4082		if (ret > 0) {
4083			if (path->slots[0] == 0)
4084				break;
4085			path->slots[0]--;
4086		}
4087		leaf = path->nodes[0];
4088		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4089		btrfs_release_path(path);
4090
4091		if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4092		    key.type != BTRFS_ROOT_ITEM_KEY)
4093			break;
4094
4095		reloc_root = btrfs_read_tree_root(root, &key);
4096		if (IS_ERR(reloc_root)) {
4097			err = PTR_ERR(reloc_root);
4098			goto out;
4099		}
4100
4101		set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4102		list_add(&reloc_root->root_list, &reloc_roots);
4103
4104		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4105			fs_root = btrfs_get_fs_root(fs_info,
4106					reloc_root->root_key.offset, false);
4107			if (IS_ERR(fs_root)) {
4108				ret = PTR_ERR(fs_root);
4109				if (ret != -ENOENT) {
4110					err = ret;
4111					goto out;
4112				}
4113				ret = mark_garbage_root(reloc_root);
4114				if (ret < 0) {
4115					err = ret;
4116					goto out;
4117				}
4118			} else {
4119				btrfs_put_root(fs_root);
4120			}
4121		}
4122
4123		if (key.offset == 0)
4124			break;
4125
4126		key.offset--;
4127	}
4128	btrfs_release_path(path);
4129
4130	if (list_empty(&reloc_roots))
4131		goto out;
4132
4133	rc = alloc_reloc_control(fs_info);
4134	if (!rc) {
4135		err = -ENOMEM;
4136		goto out;
4137	}
4138
4139	ret = reloc_chunk_start(fs_info);
4140	if (ret < 0) {
4141		err = ret;
4142		goto out_end;
4143	}
4144
4145	rc->extent_root = fs_info->extent_root;
4146
4147	set_reloc_control(rc);
4148
4149	trans = btrfs_join_transaction(rc->extent_root);
4150	if (IS_ERR(trans)) {
4151		err = PTR_ERR(trans);
4152		goto out_unset;
4153	}
4154
4155	rc->merge_reloc_tree = 1;
4156
4157	while (!list_empty(&reloc_roots)) {
4158		reloc_root = list_entry(reloc_roots.next,
4159					struct btrfs_root, root_list);
4160		list_del(&reloc_root->root_list);
4161
4162		if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4163			list_add_tail(&reloc_root->root_list,
4164				      &rc->reloc_roots);
4165			continue;
4166		}
4167
4168		fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4169					    false);
4170		if (IS_ERR(fs_root)) {
4171			err = PTR_ERR(fs_root);
4172			list_add_tail(&reloc_root->root_list, &reloc_roots);
4173			btrfs_end_transaction(trans);
4174			goto out_unset;
4175		}
4176
4177		err = __add_reloc_root(reloc_root);
4178		ASSERT(err != -EEXIST);
4179		if (err) {
4180			list_add_tail(&reloc_root->root_list, &reloc_roots);
4181			btrfs_put_root(fs_root);
4182			btrfs_end_transaction(trans);
4183			goto out_unset;
4184		}
4185		fs_root->reloc_root = btrfs_grab_root(reloc_root);
4186		btrfs_put_root(fs_root);
4187	}
4188
4189	err = btrfs_commit_transaction(trans);
4190	if (err)
4191		goto out_unset;
4192
4193	merge_reloc_roots(rc);
4194
4195	unset_reloc_control(rc);
4196
4197	trans = btrfs_join_transaction(rc->extent_root);
4198	if (IS_ERR(trans)) {
4199		err = PTR_ERR(trans);
4200		goto out_clean;
4201	}
4202	err = btrfs_commit_transaction(trans);
4203out_clean:
4204	ret = clean_dirty_subvols(rc);
4205	if (ret < 0 && !err)
4206		err = ret;
4207out_unset:
4208	unset_reloc_control(rc);
4209out_end:
4210	reloc_chunk_end(fs_info);
4211	free_reloc_control(rc);
4212out:
4213	free_reloc_roots(&reloc_roots);
4214
4215	btrfs_free_path(path);
4216
4217	if (err == 0) {
4218		/* cleanup orphan inode in data relocation tree */
4219		fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4220		ASSERT(fs_root);
4221		err = btrfs_orphan_cleanup(fs_root);
4222		btrfs_put_root(fs_root);
4223	}
4224	return err;
4225}
4226
4227/*
4228 * helper to add ordered checksum for data relocation.
4229 *
4230 * cloning checksum properly handles the nodatasum extents.
4231 * it also saves CPU time to re-calculate the checksum.
4232 */
4233int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
4234{
4235	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 
4236	struct btrfs_ordered_sum *sums;
4237	struct btrfs_ordered_extent *ordered;
4238	int ret;
4239	u64 disk_bytenr;
4240	u64 new_bytenr;
4241	LIST_HEAD(list);
4242
4243	ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4244	BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
4245
4246	disk_bytenr = file_pos + inode->index_cnt;
4247	ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
4248				       disk_bytenr + len - 1, &list, 0);
 
4249	if (ret)
4250		goto out;
4251
4252	while (!list_empty(&list)) {
4253		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4254		list_del_init(&sums->list);
4255
4256		/*
4257		 * We need to offset the new_bytenr based on where the csum is.
4258		 * We need to do this because we will read in entire prealloc
4259		 * extents but we may have written to say the middle of the
4260		 * prealloc extent, so we need to make sure the csum goes with
4261		 * the right disk offset.
4262		 *
4263		 * We can do this because the data reloc inode refers strictly
4264		 * to the on disk bytes, so we don't have to worry about
4265		 * disk_len vs real len like with real inodes since it's all
4266		 * disk length.
4267		 */
4268		new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
4269		sums->bytenr = new_bytenr;
4270
4271		btrfs_add_ordered_sum(ordered, sums);
4272	}
4273out:
4274	btrfs_put_ordered_extent(ordered);
4275	return ret;
4276}
4277
4278int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4279			  struct btrfs_root *root, struct extent_buffer *buf,
4280			  struct extent_buffer *cow)
4281{
4282	struct btrfs_fs_info *fs_info = root->fs_info;
4283	struct reloc_control *rc;
4284	struct btrfs_backref_node *node;
4285	int first_cow = 0;
4286	int level;
4287	int ret = 0;
4288
4289	rc = fs_info->reloc_ctl;
4290	if (!rc)
4291		return 0;
4292
4293	BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4294	       root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4295
4296	level = btrfs_header_level(buf);
4297	if (btrfs_header_generation(buf) <=
4298	    btrfs_root_last_snapshot(&root->root_item))
4299		first_cow = 1;
4300
4301	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4302	    rc->create_reloc_tree) {
4303		WARN_ON(!first_cow && level == 0);
4304
4305		node = rc->backref_cache.path[level];
4306		BUG_ON(node->bytenr != buf->start &&
4307		       node->new_bytenr != buf->start);
4308
4309		btrfs_backref_drop_node_buffer(node);
4310		atomic_inc(&cow->refs);
4311		node->eb = cow;
4312		node->new_bytenr = cow->start;
4313
4314		if (!node->pending) {
4315			list_move_tail(&node->list,
4316				       &rc->backref_cache.pending[level]);
4317			node->pending = 1;
4318		}
4319
4320		if (first_cow)
4321			mark_block_processed(rc, node);
4322
4323		if (first_cow && level > 0)
4324			rc->nodes_relocated += buf->len;
4325	}
4326
4327	if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4328		ret = replace_file_extents(trans, rc, root, cow);
4329	return ret;
4330}
4331
4332/*
4333 * called before creating snapshot. it calculates metadata reservation
4334 * required for relocating tree blocks in the snapshot
4335 */
4336void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4337			      u64 *bytes_to_reserve)
4338{
4339	struct btrfs_root *root = pending->root;
4340	struct reloc_control *rc = root->fs_info->reloc_ctl;
4341
4342	if (!rc || !have_reloc_root(root))
4343		return;
4344
4345	if (!rc->merge_reloc_tree)
4346		return;
4347
4348	root = root->reloc_root;
4349	BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4350	/*
4351	 * relocation is in the stage of merging trees. the space
4352	 * used by merging a reloc tree is twice the size of
4353	 * relocated tree nodes in the worst case. half for cowing
4354	 * the reloc tree, half for cowing the fs tree. the space
4355	 * used by cowing the reloc tree will be freed after the
4356	 * tree is dropped. if we create snapshot, cowing the fs
4357	 * tree may use more space than it frees. so we need
4358	 * reserve extra space.
4359	 */
4360	*bytes_to_reserve += rc->nodes_relocated;
4361}
4362
4363/*
4364 * called after snapshot is created. migrate block reservation
4365 * and create reloc root for the newly created snapshot
4366 *
4367 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4368 * references held on the reloc_root, one for root->reloc_root and one for
4369 * rc->reloc_roots.
4370 */
4371int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4372			       struct btrfs_pending_snapshot *pending)
4373{
4374	struct btrfs_root *root = pending->root;
4375	struct btrfs_root *reloc_root;
4376	struct btrfs_root *new_root;
4377	struct reloc_control *rc = root->fs_info->reloc_ctl;
4378	int ret;
4379
4380	if (!rc || !have_reloc_root(root))
4381		return 0;
4382
4383	rc = root->fs_info->reloc_ctl;
4384	rc->merging_rsv_size += rc->nodes_relocated;
4385
4386	if (rc->merge_reloc_tree) {
4387		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4388					      rc->block_rsv,
4389					      rc->nodes_relocated, true);
4390		if (ret)
4391			return ret;
4392	}
4393
4394	new_root = pending->snap;
4395	reloc_root = create_reloc_root(trans, root->reloc_root,
4396				       new_root->root_key.objectid);
4397	if (IS_ERR(reloc_root))
4398		return PTR_ERR(reloc_root);
4399
4400	ret = __add_reloc_root(reloc_root);
4401	ASSERT(ret != -EEXIST);
4402	if (ret) {
4403		/* Pairs with create_reloc_root */
4404		btrfs_put_root(reloc_root);
4405		return ret;
4406	}
4407	new_root->reloc_root = btrfs_grab_root(reloc_root);
4408
4409	if (rc->create_reloc_tree)
4410		ret = clone_backref_node(trans, rc, root, reloc_root);
4411	return ret;
4412}