Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16#include "misc.h"
  17#include "tree-mod-log.h"
  18#include "fs.h"
  19#include "accessors.h"
  20#include "extent-tree.h"
  21#include "relocation.h"
  22#include "tree-checker.h"
  23
  24/* Just arbitrary numbers so we can be sure one of these happened. */
  25#define BACKREF_FOUND_SHARED     6
  26#define BACKREF_FOUND_NOT_SHARED 7
  27
  28struct extent_inode_elem {
  29	u64 inum;
  30	u64 offset;
  31	u64 num_bytes;
  32	struct extent_inode_elem *next;
  33};
  34
  35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
  36			      const struct btrfs_key *key,
  37			      const struct extent_buffer *eb,
  38			      const struct btrfs_file_extent_item *fi,
  39			      struct extent_inode_elem **eie)
  40{
  41	const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
  42	u64 offset = key->offset;
  43	struct extent_inode_elem *e;
  44	const u64 *root_ids;
  45	int root_count;
  46	bool cached;
  47
  48	if (!btrfs_file_extent_compression(eb, fi) &&
  49	    !btrfs_file_extent_encryption(eb, fi) &&
  50	    !btrfs_file_extent_other_encoding(eb, fi)) {
  51		u64 data_offset;
  52
  53		data_offset = btrfs_file_extent_offset(eb, fi);
  54
  55		if (ctx->extent_item_pos < data_offset ||
  56		    ctx->extent_item_pos >= data_offset + data_len)
  57			return 1;
  58		offset += ctx->extent_item_pos - data_offset;
  59	}
  60
  61	if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
  62		goto add_inode_elem;
  63
  64	cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
  65				   &root_count);
  66	if (!cached)
  67		goto add_inode_elem;
  68
  69	for (int i = 0; i < root_count; i++) {
  70		int ret;
  71
  72		ret = ctx->indirect_ref_iterator(key->objectid, offset,
  73						 data_len, root_ids[i],
  74						 ctx->user_ctx);
  75		if (ret)
  76			return ret;
  77	}
  78
  79add_inode_elem:
  80	e = kmalloc(sizeof(*e), GFP_NOFS);
  81	if (!e)
  82		return -ENOMEM;
  83
  84	e->next = *eie;
  85	e->inum = key->objectid;
  86	e->offset = offset;
  87	e->num_bytes = data_len;
  88	*eie = e;
  89
  90	return 0;
  91}
  92
  93static void free_inode_elem_list(struct extent_inode_elem *eie)
  94{
  95	struct extent_inode_elem *eie_next;
  96
  97	for (; eie; eie = eie_next) {
  98		eie_next = eie->next;
  99		kfree(eie);
 100	}
 101}
 102
 103static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
 104			     const struct extent_buffer *eb,
 105			     struct extent_inode_elem **eie)
 106{
 107	u64 disk_byte;
 108	struct btrfs_key key;
 109	struct btrfs_file_extent_item *fi;
 110	int slot;
 111	int nritems;
 112	int extent_type;
 113	int ret;
 114
 115	/*
 116	 * from the shared data ref, we only have the leaf but we need
 117	 * the key. thus, we must look into all items and see that we
 118	 * find one (some) with a reference to our extent item.
 119	 */
 120	nritems = btrfs_header_nritems(eb);
 121	for (slot = 0; slot < nritems; ++slot) {
 122		btrfs_item_key_to_cpu(eb, &key, slot);
 123		if (key.type != BTRFS_EXTENT_DATA_KEY)
 124			continue;
 125		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 126		extent_type = btrfs_file_extent_type(eb, fi);
 127		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 128			continue;
 129		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 130		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 131		if (disk_byte != ctx->bytenr)
 132			continue;
 133
 134		ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
 135		if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 136			return ret;
 137	}
 138
 139	return 0;
 140}
 141
 142struct preftree {
 143	struct rb_root_cached root;
 144	unsigned int count;
 145};
 146
 147#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 148
 149struct preftrees {
 150	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 151	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 152	struct preftree indirect_missing_keys;
 153};
 154
 155/*
 156 * Checks for a shared extent during backref search.
 157 *
 158 * The share_count tracks prelim_refs (direct and indirect) having a
 159 * ref->count >0:
 160 *  - incremented when a ref->count transitions to >0
 161 *  - decremented when a ref->count transitions to <1
 162 */
 163struct share_check {
 164	struct btrfs_backref_share_check_ctx *ctx;
 165	struct btrfs_root *root;
 166	u64 inum;
 167	u64 data_bytenr;
 168	u64 data_extent_gen;
 169	/*
 170	 * Counts number of inodes that refer to an extent (different inodes in
 171	 * the same root or different roots) that we could find. The sharedness
 172	 * check typically stops once this counter gets greater than 1, so it
 173	 * may not reflect the total number of inodes.
 174	 */
 175	int share_count;
 176	/*
 177	 * The number of times we found our inode refers to the data extent we
 178	 * are determining the sharedness. In other words, how many file extent
 179	 * items we could find for our inode that point to our target data
 180	 * extent. The value we get here after finishing the extent sharedness
 181	 * check may be smaller than reality, but if it ends up being greater
 182	 * than 1, then we know for sure the inode has multiple file extent
 183	 * items that point to our inode, and we can safely assume it's useful
 184	 * to cache the sharedness check result.
 185	 */
 186	int self_ref_count;
 187	bool have_delayed_delete_refs;
 188};
 189
 190static inline int extent_is_shared(struct share_check *sc)
 191{
 192	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 193}
 194
 195static struct kmem_cache *btrfs_prelim_ref_cache;
 196
 197int __init btrfs_prelim_ref_init(void)
 198{
 199	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 200					sizeof(struct prelim_ref),
 201					0,
 202					SLAB_MEM_SPREAD,
 203					NULL);
 204	if (!btrfs_prelim_ref_cache)
 205		return -ENOMEM;
 206	return 0;
 207}
 208
 209void __cold btrfs_prelim_ref_exit(void)
 210{
 211	kmem_cache_destroy(btrfs_prelim_ref_cache);
 212}
 213
 214static void free_pref(struct prelim_ref *ref)
 215{
 216	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 217}
 218
 219/*
 220 * Return 0 when both refs are for the same block (and can be merged).
 221 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 222 * indicates a 'higher' block.
 223 */
 224static int prelim_ref_compare(struct prelim_ref *ref1,
 225			      struct prelim_ref *ref2)
 226{
 227	if (ref1->level < ref2->level)
 228		return -1;
 229	if (ref1->level > ref2->level)
 230		return 1;
 231	if (ref1->root_id < ref2->root_id)
 232		return -1;
 233	if (ref1->root_id > ref2->root_id)
 234		return 1;
 235	if (ref1->key_for_search.type < ref2->key_for_search.type)
 236		return -1;
 237	if (ref1->key_for_search.type > ref2->key_for_search.type)
 238		return 1;
 239	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 240		return -1;
 241	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 242		return 1;
 243	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 244		return -1;
 245	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 246		return 1;
 247	if (ref1->parent < ref2->parent)
 248		return -1;
 249	if (ref1->parent > ref2->parent)
 250		return 1;
 251
 252	return 0;
 253}
 254
 255static void update_share_count(struct share_check *sc, int oldcount,
 256			       int newcount, struct prelim_ref *newref)
 257{
 258	if ((!sc) || (oldcount == 0 && newcount < 1))
 259		return;
 260
 261	if (oldcount > 0 && newcount < 1)
 262		sc->share_count--;
 263	else if (oldcount < 1 && newcount > 0)
 264		sc->share_count++;
 265
 266	if (newref->root_id == sc->root->root_key.objectid &&
 267	    newref->wanted_disk_byte == sc->data_bytenr &&
 268	    newref->key_for_search.objectid == sc->inum)
 269		sc->self_ref_count += newref->count;
 270}
 271
 272/*
 273 * Add @newref to the @root rbtree, merging identical refs.
 274 *
 275 * Callers should assume that newref has been freed after calling.
 276 */
 277static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 278			      struct preftree *preftree,
 279			      struct prelim_ref *newref,
 280			      struct share_check *sc)
 281{
 282	struct rb_root_cached *root;
 283	struct rb_node **p;
 284	struct rb_node *parent = NULL;
 285	struct prelim_ref *ref;
 286	int result;
 287	bool leftmost = true;
 288
 289	root = &preftree->root;
 290	p = &root->rb_root.rb_node;
 291
 292	while (*p) {
 293		parent = *p;
 294		ref = rb_entry(parent, struct prelim_ref, rbnode);
 295		result = prelim_ref_compare(ref, newref);
 296		if (result < 0) {
 297			p = &(*p)->rb_left;
 298		} else if (result > 0) {
 299			p = &(*p)->rb_right;
 300			leftmost = false;
 301		} else {
 302			/* Identical refs, merge them and free @newref */
 303			struct extent_inode_elem *eie = ref->inode_list;
 304
 305			while (eie && eie->next)
 306				eie = eie->next;
 307
 308			if (!eie)
 309				ref->inode_list = newref->inode_list;
 310			else
 311				eie->next = newref->inode_list;
 312			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 313						     preftree->count);
 314			/*
 315			 * A delayed ref can have newref->count < 0.
 316			 * The ref->count is updated to follow any
 317			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 318			 */
 319			update_share_count(sc, ref->count,
 320					   ref->count + newref->count, newref);
 321			ref->count += newref->count;
 322			free_pref(newref);
 323			return;
 324		}
 325	}
 326
 327	update_share_count(sc, 0, newref->count, newref);
 328	preftree->count++;
 329	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 330	rb_link_node(&newref->rbnode, parent, p);
 331	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 332}
 333
 334/*
 335 * Release the entire tree.  We don't care about internal consistency so
 336 * just free everything and then reset the tree root.
 337 */
 338static void prelim_release(struct preftree *preftree)
 339{
 340	struct prelim_ref *ref, *next_ref;
 341
 342	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 343					     &preftree->root.rb_root, rbnode) {
 344		free_inode_elem_list(ref->inode_list);
 345		free_pref(ref);
 346	}
 347
 348	preftree->root = RB_ROOT_CACHED;
 349	preftree->count = 0;
 350}
 351
 352/*
 353 * the rules for all callers of this function are:
 354 * - obtaining the parent is the goal
 355 * - if you add a key, you must know that it is a correct key
 356 * - if you cannot add the parent or a correct key, then we will look into the
 357 *   block later to set a correct key
 358 *
 359 * delayed refs
 360 * ============
 361 *        backref type | shared | indirect | shared | indirect
 362 * information         |   tree |     tree |   data |     data
 363 * --------------------+--------+----------+--------+----------
 364 *      parent logical |    y   |     -    |    -   |     -
 365 *      key to resolve |    -   |     y    |    y   |     y
 366 *  tree block logical |    -   |     -    |    -   |     -
 367 *  root for resolving |    y   |     y    |    y   |     y
 368 *
 369 * - column 1:       we've the parent -> done
 370 * - column 2, 3, 4: we use the key to find the parent
 371 *
 372 * on disk refs (inline or keyed)
 373 * ==============================
 374 *        backref type | shared | indirect | shared | indirect
 375 * information         |   tree |     tree |   data |     data
 376 * --------------------+--------+----------+--------+----------
 377 *      parent logical |    y   |     -    |    y   |     -
 378 *      key to resolve |    -   |     -    |    -   |     y
 379 *  tree block logical |    y   |     y    |    y   |     y
 380 *  root for resolving |    -   |     y    |    y   |     y
 381 *
 382 * - column 1, 3: we've the parent -> done
 383 * - column 2:    we take the first key from the block to find the parent
 384 *                (see add_missing_keys)
 385 * - column 4:    we use the key to find the parent
 386 *
 387 * additional information that's available but not required to find the parent
 388 * block might help in merging entries to gain some speed.
 389 */
 390static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 391			  struct preftree *preftree, u64 root_id,
 392			  const struct btrfs_key *key, int level, u64 parent,
 393			  u64 wanted_disk_byte, int count,
 394			  struct share_check *sc, gfp_t gfp_mask)
 395{
 396	struct prelim_ref *ref;
 397
 398	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 399		return 0;
 400
 401	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 402	if (!ref)
 403		return -ENOMEM;
 404
 405	ref->root_id = root_id;
 406	if (key)
 407		ref->key_for_search = *key;
 408	else
 409		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 410
 411	ref->inode_list = NULL;
 412	ref->level = level;
 413	ref->count = count;
 414	ref->parent = parent;
 415	ref->wanted_disk_byte = wanted_disk_byte;
 416	prelim_ref_insert(fs_info, preftree, ref, sc);
 417	return extent_is_shared(sc);
 418}
 419
 420/* direct refs use root == 0, key == NULL */
 421static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 422			  struct preftrees *preftrees, int level, u64 parent,
 423			  u64 wanted_disk_byte, int count,
 424			  struct share_check *sc, gfp_t gfp_mask)
 425{
 426	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 427			      parent, wanted_disk_byte, count, sc, gfp_mask);
 428}
 429
 430/* indirect refs use parent == 0 */
 431static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 432			    struct preftrees *preftrees, u64 root_id,
 433			    const struct btrfs_key *key, int level,
 434			    u64 wanted_disk_byte, int count,
 435			    struct share_check *sc, gfp_t gfp_mask)
 436{
 437	struct preftree *tree = &preftrees->indirect;
 438
 439	if (!key)
 440		tree = &preftrees->indirect_missing_keys;
 441	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 442			      wanted_disk_byte, count, sc, gfp_mask);
 443}
 444
 445static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
 446{
 447	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
 448	struct rb_node *parent = NULL;
 449	struct prelim_ref *ref = NULL;
 450	struct prelim_ref target = {};
 451	int result;
 452
 453	target.parent = bytenr;
 454
 455	while (*p) {
 456		parent = *p;
 457		ref = rb_entry(parent, struct prelim_ref, rbnode);
 458		result = prelim_ref_compare(ref, &target);
 459
 460		if (result < 0)
 461			p = &(*p)->rb_left;
 462		else if (result > 0)
 463			p = &(*p)->rb_right;
 464		else
 465			return 1;
 466	}
 467	return 0;
 468}
 469
 470static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
 471			   struct btrfs_root *root, struct btrfs_path *path,
 472			   struct ulist *parents,
 473			   struct preftrees *preftrees, struct prelim_ref *ref,
 474			   int level)
 475{
 476	int ret = 0;
 477	int slot;
 478	struct extent_buffer *eb;
 479	struct btrfs_key key;
 480	struct btrfs_key *key_for_search = &ref->key_for_search;
 481	struct btrfs_file_extent_item *fi;
 482	struct extent_inode_elem *eie = NULL, *old = NULL;
 483	u64 disk_byte;
 484	u64 wanted_disk_byte = ref->wanted_disk_byte;
 485	u64 count = 0;
 486	u64 data_offset;
 487	u8 type;
 488
 489	if (level != 0) {
 490		eb = path->nodes[level];
 491		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 492		if (ret < 0)
 493			return ret;
 494		return 0;
 495	}
 496
 497	/*
 498	 * 1. We normally enter this function with the path already pointing to
 499	 *    the first item to check. But sometimes, we may enter it with
 500	 *    slot == nritems.
 501	 * 2. We are searching for normal backref but bytenr of this leaf
 502	 *    matches shared data backref
 503	 * 3. The leaf owner is not equal to the root we are searching
 504	 *
 505	 * For these cases, go to the next leaf before we continue.
 506	 */
 507	eb = path->nodes[0];
 508	if (path->slots[0] >= btrfs_header_nritems(eb) ||
 509	    is_shared_data_backref(preftrees, eb->start) ||
 510	    ref->root_id != btrfs_header_owner(eb)) {
 511		if (ctx->time_seq == BTRFS_SEQ_LAST)
 512			ret = btrfs_next_leaf(root, path);
 513		else
 514			ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 515	}
 516
 517	while (!ret && count < ref->count) {
 518		eb = path->nodes[0];
 519		slot = path->slots[0];
 520
 521		btrfs_item_key_to_cpu(eb, &key, slot);
 522
 523		if (key.objectid != key_for_search->objectid ||
 524		    key.type != BTRFS_EXTENT_DATA_KEY)
 525			break;
 526
 527		/*
 528		 * We are searching for normal backref but bytenr of this leaf
 529		 * matches shared data backref, OR
 530		 * the leaf owner is not equal to the root we are searching for
 531		 */
 532		if (slot == 0 &&
 533		    (is_shared_data_backref(preftrees, eb->start) ||
 534		     ref->root_id != btrfs_header_owner(eb))) {
 535			if (ctx->time_seq == BTRFS_SEQ_LAST)
 536				ret = btrfs_next_leaf(root, path);
 537			else
 538				ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 539			continue;
 540		}
 541		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 542		type = btrfs_file_extent_type(eb, fi);
 543		if (type == BTRFS_FILE_EXTENT_INLINE)
 544			goto next;
 545		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 546		data_offset = btrfs_file_extent_offset(eb, fi);
 547
 548		if (disk_byte == wanted_disk_byte) {
 549			eie = NULL;
 550			old = NULL;
 551			if (ref->key_for_search.offset == key.offset - data_offset)
 552				count++;
 553			else
 554				goto next;
 555			if (!ctx->ignore_extent_item_pos) {
 556				ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
 557				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
 558				    ret < 0)
 559					break;
 560			}
 561			if (ret > 0)
 562				goto next;
 563			ret = ulist_add_merge_ptr(parents, eb->start,
 564						  eie, (void **)&old, GFP_NOFS);
 565			if (ret < 0)
 566				break;
 567			if (!ret && !ctx->ignore_extent_item_pos) {
 568				while (old->next)
 569					old = old->next;
 570				old->next = eie;
 571			}
 572			eie = NULL;
 573		}
 574next:
 575		if (ctx->time_seq == BTRFS_SEQ_LAST)
 576			ret = btrfs_next_item(root, path);
 577		else
 578			ret = btrfs_next_old_item(root, path, ctx->time_seq);
 579	}
 580
 581	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 582		free_inode_elem_list(eie);
 583	else if (ret > 0)
 584		ret = 0;
 585
 586	return ret;
 587}
 588
 589/*
 590 * resolve an indirect backref in the form (root_id, key, level)
 591 * to a logical address
 592 */
 593static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
 594				struct btrfs_path *path,
 595				struct preftrees *preftrees,
 596				struct prelim_ref *ref, struct ulist *parents)
 597{
 598	struct btrfs_root *root;
 599	struct extent_buffer *eb;
 600	int ret = 0;
 601	int root_level;
 602	int level = ref->level;
 603	struct btrfs_key search_key = ref->key_for_search;
 604
 605	/*
 606	 * If we're search_commit_root we could possibly be holding locks on
 607	 * other tree nodes.  This happens when qgroups does backref walks when
 608	 * adding new delayed refs.  To deal with this we need to look in cache
 609	 * for the root, and if we don't find it then we need to search the
 610	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
 611	 * here.
 612	 */
 613	if (path->search_commit_root)
 614		root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
 615	else
 616		root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
 617	if (IS_ERR(root)) {
 618		ret = PTR_ERR(root);
 619		goto out_free;
 620	}
 621
 622	if (!path->search_commit_root &&
 623	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
 624		ret = -ENOENT;
 625		goto out;
 626	}
 627
 628	if (btrfs_is_testing(ctx->fs_info)) {
 629		ret = -ENOENT;
 630		goto out;
 631	}
 632
 633	if (path->search_commit_root)
 634		root_level = btrfs_header_level(root->commit_root);
 635	else if (ctx->time_seq == BTRFS_SEQ_LAST)
 636		root_level = btrfs_header_level(root->node);
 637	else
 638		root_level = btrfs_old_root_level(root, ctx->time_seq);
 639
 640	if (root_level + 1 == level)
 641		goto out;
 642
 643	/*
 644	 * We can often find data backrefs with an offset that is too large
 645	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
 646	 * subtracting a file's offset with the data offset of its
 647	 * corresponding extent data item. This can happen for example in the
 648	 * clone ioctl.
 649	 *
 650	 * So if we detect such case we set the search key's offset to zero to
 651	 * make sure we will find the matching file extent item at
 652	 * add_all_parents(), otherwise we will miss it because the offset
 653	 * taken form the backref is much larger then the offset of the file
 654	 * extent item. This can make us scan a very large number of file
 655	 * extent items, but at least it will not make us miss any.
 656	 *
 657	 * This is an ugly workaround for a behaviour that should have never
 658	 * existed, but it does and a fix for the clone ioctl would touch a lot
 659	 * of places, cause backwards incompatibility and would not fix the
 660	 * problem for extents cloned with older kernels.
 661	 */
 662	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
 663	    search_key.offset >= LLONG_MAX)
 664		search_key.offset = 0;
 665	path->lowest_level = level;
 666	if (ctx->time_seq == BTRFS_SEQ_LAST)
 667		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 668	else
 669		ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
 670
 671	btrfs_debug(ctx->fs_info,
 672		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 673		 ref->root_id, level, ref->count, ret,
 674		 ref->key_for_search.objectid, ref->key_for_search.type,
 675		 ref->key_for_search.offset);
 676	if (ret < 0)
 677		goto out;
 678
 679	eb = path->nodes[level];
 680	while (!eb) {
 681		if (WARN_ON(!level)) {
 682			ret = 1;
 683			goto out;
 684		}
 685		level--;
 686		eb = path->nodes[level];
 687	}
 688
 689	ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
 690out:
 691	btrfs_put_root(root);
 692out_free:
 693	path->lowest_level = 0;
 694	btrfs_release_path(path);
 695	return ret;
 696}
 697
 698static struct extent_inode_elem *
 699unode_aux_to_inode_list(struct ulist_node *node)
 700{
 701	if (!node)
 702		return NULL;
 703	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 704}
 705
 706static void free_leaf_list(struct ulist *ulist)
 707{
 708	struct ulist_node *node;
 709	struct ulist_iterator uiter;
 710
 711	ULIST_ITER_INIT(&uiter);
 712	while ((node = ulist_next(ulist, &uiter)))
 713		free_inode_elem_list(unode_aux_to_inode_list(node));
 714
 715	ulist_free(ulist);
 716}
 717
 718/*
 719 * We maintain three separate rbtrees: one for direct refs, one for
 720 * indirect refs which have a key, and one for indirect refs which do not
 721 * have a key. Each tree does merge on insertion.
 722 *
 723 * Once all of the references are located, we iterate over the tree of
 724 * indirect refs with missing keys. An appropriate key is located and
 725 * the ref is moved onto the tree for indirect refs. After all missing
 726 * keys are thus located, we iterate over the indirect ref tree, resolve
 727 * each reference, and then insert the resolved reference onto the
 728 * direct tree (merging there too).
 729 *
 730 * New backrefs (i.e., for parent nodes) are added to the appropriate
 731 * rbtree as they are encountered. The new backrefs are subsequently
 732 * resolved as above.
 733 */
 734static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
 735				 struct btrfs_path *path,
 736				 struct preftrees *preftrees,
 737				 struct share_check *sc)
 738{
 739	int err;
 740	int ret = 0;
 741	struct ulist *parents;
 742	struct ulist_node *node;
 743	struct ulist_iterator uiter;
 744	struct rb_node *rnode;
 745
 746	parents = ulist_alloc(GFP_NOFS);
 747	if (!parents)
 748		return -ENOMEM;
 749
 750	/*
 751	 * We could trade memory usage for performance here by iterating
 752	 * the tree, allocating new refs for each insertion, and then
 753	 * freeing the entire indirect tree when we're done.  In some test
 754	 * cases, the tree can grow quite large (~200k objects).
 755	 */
 756	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 757		struct prelim_ref *ref;
 758
 759		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 760		if (WARN(ref->parent,
 761			 "BUG: direct ref found in indirect tree")) {
 762			ret = -EINVAL;
 763			goto out;
 764		}
 765
 766		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 767		preftrees->indirect.count--;
 768
 769		if (ref->count == 0) {
 770			free_pref(ref);
 771			continue;
 772		}
 773
 774		if (sc && ref->root_id != sc->root->root_key.objectid) {
 775			free_pref(ref);
 776			ret = BACKREF_FOUND_SHARED;
 777			goto out;
 778		}
 779		err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
 780		/*
 781		 * we can only tolerate ENOENT,otherwise,we should catch error
 782		 * and return directly.
 783		 */
 784		if (err == -ENOENT) {
 785			prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
 786					  NULL);
 787			continue;
 788		} else if (err) {
 789			free_pref(ref);
 790			ret = err;
 791			goto out;
 792		}
 793
 794		/* we put the first parent into the ref at hand */
 795		ULIST_ITER_INIT(&uiter);
 796		node = ulist_next(parents, &uiter);
 797		ref->parent = node ? node->val : 0;
 798		ref->inode_list = unode_aux_to_inode_list(node);
 799
 800		/* Add a prelim_ref(s) for any other parent(s). */
 801		while ((node = ulist_next(parents, &uiter))) {
 802			struct prelim_ref *new_ref;
 803
 804			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 805						   GFP_NOFS);
 806			if (!new_ref) {
 807				free_pref(ref);
 808				ret = -ENOMEM;
 809				goto out;
 810			}
 811			memcpy(new_ref, ref, sizeof(*ref));
 812			new_ref->parent = node->val;
 813			new_ref->inode_list = unode_aux_to_inode_list(node);
 814			prelim_ref_insert(ctx->fs_info, &preftrees->direct,
 815					  new_ref, NULL);
 816		}
 817
 818		/*
 819		 * Now it's a direct ref, put it in the direct tree. We must
 820		 * do this last because the ref could be merged/freed here.
 821		 */
 822		prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
 823
 824		ulist_reinit(parents);
 825		cond_resched();
 826	}
 827out:
 828	/*
 829	 * We may have inode lists attached to refs in the parents ulist, so we
 830	 * must free them before freeing the ulist and its refs.
 831	 */
 832	free_leaf_list(parents);
 833	return ret;
 834}
 835
 836/*
 837 * read tree blocks and add keys where required.
 838 */
 839static int add_missing_keys(struct btrfs_fs_info *fs_info,
 840			    struct preftrees *preftrees, bool lock)
 841{
 842	struct prelim_ref *ref;
 843	struct extent_buffer *eb;
 844	struct preftree *tree = &preftrees->indirect_missing_keys;
 845	struct rb_node *node;
 846
 847	while ((node = rb_first_cached(&tree->root))) {
 848		struct btrfs_tree_parent_check check = { 0 };
 849
 850		ref = rb_entry(node, struct prelim_ref, rbnode);
 851		rb_erase_cached(node, &tree->root);
 852
 853		BUG_ON(ref->parent);	/* should not be a direct ref */
 854		BUG_ON(ref->key_for_search.type);
 855		BUG_ON(!ref->wanted_disk_byte);
 856
 857		check.level = ref->level - 1;
 858		check.owner_root = ref->root_id;
 859
 860		eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
 861		if (IS_ERR(eb)) {
 862			free_pref(ref);
 863			return PTR_ERR(eb);
 864		}
 865		if (!extent_buffer_uptodate(eb)) {
 866			free_pref(ref);
 867			free_extent_buffer(eb);
 868			return -EIO;
 869		}
 870
 871		if (lock)
 872			btrfs_tree_read_lock(eb);
 873		if (btrfs_header_level(eb) == 0)
 874			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 875		else
 876			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 877		if (lock)
 878			btrfs_tree_read_unlock(eb);
 879		free_extent_buffer(eb);
 880		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 881		cond_resched();
 882	}
 883	return 0;
 884}
 885
 886/*
 887 * add all currently queued delayed refs from this head whose seq nr is
 888 * smaller or equal that seq to the list
 889 */
 890static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 891			    struct btrfs_delayed_ref_head *head, u64 seq,
 892			    struct preftrees *preftrees, struct share_check *sc)
 893{
 894	struct btrfs_delayed_ref_node *node;
 895	struct btrfs_key key;
 896	struct rb_node *n;
 897	int count;
 898	int ret = 0;
 899
 900	spin_lock(&head->lock);
 901	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 902		node = rb_entry(n, struct btrfs_delayed_ref_node,
 903				ref_node);
 904		if (node->seq > seq)
 905			continue;
 906
 907		switch (node->action) {
 908		case BTRFS_ADD_DELAYED_EXTENT:
 909		case BTRFS_UPDATE_DELAYED_HEAD:
 910			WARN_ON(1);
 911			continue;
 912		case BTRFS_ADD_DELAYED_REF:
 913			count = node->ref_mod;
 914			break;
 915		case BTRFS_DROP_DELAYED_REF:
 916			count = node->ref_mod * -1;
 917			break;
 918		default:
 919			BUG();
 920		}
 921		switch (node->type) {
 922		case BTRFS_TREE_BLOCK_REF_KEY: {
 923			/* NORMAL INDIRECT METADATA backref */
 924			struct btrfs_delayed_tree_ref *ref;
 925			struct btrfs_key *key_ptr = NULL;
 926
 927			if (head->extent_op && head->extent_op->update_key) {
 928				btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
 929				key_ptr = &key;
 930			}
 931
 932			ref = btrfs_delayed_node_to_tree_ref(node);
 933			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 934					       key_ptr, ref->level + 1,
 935					       node->bytenr, count, sc,
 936					       GFP_ATOMIC);
 937			break;
 938		}
 939		case BTRFS_SHARED_BLOCK_REF_KEY: {
 940			/* SHARED DIRECT METADATA backref */
 941			struct btrfs_delayed_tree_ref *ref;
 942
 943			ref = btrfs_delayed_node_to_tree_ref(node);
 944
 945			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
 946					     ref->parent, node->bytenr, count,
 947					     sc, GFP_ATOMIC);
 948			break;
 949		}
 950		case BTRFS_EXTENT_DATA_REF_KEY: {
 951			/* NORMAL INDIRECT DATA backref */
 952			struct btrfs_delayed_data_ref *ref;
 953			ref = btrfs_delayed_node_to_data_ref(node);
 954
 955			key.objectid = ref->objectid;
 956			key.type = BTRFS_EXTENT_DATA_KEY;
 957			key.offset = ref->offset;
 958
 959			/*
 960			 * If we have a share check context and a reference for
 961			 * another inode, we can't exit immediately. This is
 962			 * because even if this is a BTRFS_ADD_DELAYED_REF
 963			 * reference we may find next a BTRFS_DROP_DELAYED_REF
 964			 * which cancels out this ADD reference.
 965			 *
 966			 * If this is a DROP reference and there was no previous
 967			 * ADD reference, then we need to signal that when we
 968			 * process references from the extent tree (through
 969			 * add_inline_refs() and add_keyed_refs()), we should
 970			 * not exit early if we find a reference for another
 971			 * inode, because one of the delayed DROP references
 972			 * may cancel that reference in the extent tree.
 973			 */
 974			if (sc && count < 0)
 975				sc->have_delayed_delete_refs = true;
 976
 977			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 978					       &key, 0, node->bytenr, count, sc,
 979					       GFP_ATOMIC);
 980			break;
 981		}
 982		case BTRFS_SHARED_DATA_REF_KEY: {
 983			/* SHARED DIRECT FULL backref */
 984			struct btrfs_delayed_data_ref *ref;
 985
 986			ref = btrfs_delayed_node_to_data_ref(node);
 987
 988			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
 989					     node->bytenr, count, sc,
 990					     GFP_ATOMIC);
 991			break;
 992		}
 993		default:
 994			WARN_ON(1);
 995		}
 996		/*
 997		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 998		 * refs have been checked.
 999		 */
1000		if (ret && (ret != BACKREF_FOUND_SHARED))
1001			break;
1002	}
1003	if (!ret)
1004		ret = extent_is_shared(sc);
1005
1006	spin_unlock(&head->lock);
1007	return ret;
1008}
1009
1010/*
1011 * add all inline backrefs for bytenr to the list
1012 *
1013 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1014 */
1015static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1016			   struct btrfs_path *path,
1017			   int *info_level, struct preftrees *preftrees,
1018			   struct share_check *sc)
1019{
1020	int ret = 0;
1021	int slot;
1022	struct extent_buffer *leaf;
1023	struct btrfs_key key;
1024	struct btrfs_key found_key;
1025	unsigned long ptr;
1026	unsigned long end;
1027	struct btrfs_extent_item *ei;
1028	u64 flags;
1029	u64 item_size;
1030
1031	/*
1032	 * enumerate all inline refs
1033	 */
1034	leaf = path->nodes[0];
1035	slot = path->slots[0];
1036
1037	item_size = btrfs_item_size(leaf, slot);
1038	BUG_ON(item_size < sizeof(*ei));
1039
1040	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1041
1042	if (ctx->check_extent_item) {
1043		ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1044		if (ret)
1045			return ret;
1046	}
1047
1048	flags = btrfs_extent_flags(leaf, ei);
1049	btrfs_item_key_to_cpu(leaf, &found_key, slot);
1050
1051	ptr = (unsigned long)(ei + 1);
1052	end = (unsigned long)ei + item_size;
1053
1054	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1055	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1056		struct btrfs_tree_block_info *info;
1057
1058		info = (struct btrfs_tree_block_info *)ptr;
1059		*info_level = btrfs_tree_block_level(leaf, info);
1060		ptr += sizeof(struct btrfs_tree_block_info);
1061		BUG_ON(ptr > end);
1062	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1063		*info_level = found_key.offset;
1064	} else {
1065		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1066	}
1067
1068	while (ptr < end) {
1069		struct btrfs_extent_inline_ref *iref;
1070		u64 offset;
1071		int type;
1072
1073		iref = (struct btrfs_extent_inline_ref *)ptr;
1074		type = btrfs_get_extent_inline_ref_type(leaf, iref,
1075							BTRFS_REF_TYPE_ANY);
1076		if (type == BTRFS_REF_TYPE_INVALID)
1077			return -EUCLEAN;
1078
1079		offset = btrfs_extent_inline_ref_offset(leaf, iref);
1080
1081		switch (type) {
1082		case BTRFS_SHARED_BLOCK_REF_KEY:
1083			ret = add_direct_ref(ctx->fs_info, preftrees,
1084					     *info_level + 1, offset,
1085					     ctx->bytenr, 1, NULL, GFP_NOFS);
1086			break;
1087		case BTRFS_SHARED_DATA_REF_KEY: {
1088			struct btrfs_shared_data_ref *sdref;
1089			int count;
1090
1091			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1092			count = btrfs_shared_data_ref_count(leaf, sdref);
1093
1094			ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1095					     ctx->bytenr, count, sc, GFP_NOFS);
1096			break;
1097		}
1098		case BTRFS_TREE_BLOCK_REF_KEY:
1099			ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1100					       NULL, *info_level + 1,
1101					       ctx->bytenr, 1, NULL, GFP_NOFS);
1102			break;
1103		case BTRFS_EXTENT_DATA_REF_KEY: {
1104			struct btrfs_extent_data_ref *dref;
1105			int count;
1106			u64 root;
1107
1108			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1109			count = btrfs_extent_data_ref_count(leaf, dref);
1110			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1111								      dref);
1112			key.type = BTRFS_EXTENT_DATA_KEY;
1113			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1114
1115			if (sc && key.objectid != sc->inum &&
1116			    !sc->have_delayed_delete_refs) {
1117				ret = BACKREF_FOUND_SHARED;
1118				break;
1119			}
1120
1121			root = btrfs_extent_data_ref_root(leaf, dref);
1122
1123			if (!ctx->skip_data_ref ||
1124			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1125						ctx->user_ctx))
1126				ret = add_indirect_ref(ctx->fs_info, preftrees,
1127						       root, &key, 0, ctx->bytenr,
1128						       count, sc, GFP_NOFS);
1129			break;
1130		}
1131		default:
1132			WARN_ON(1);
1133		}
1134		if (ret)
1135			return ret;
1136		ptr += btrfs_extent_inline_ref_size(type);
1137	}
1138
1139	return 0;
1140}
1141
1142/*
1143 * add all non-inline backrefs for bytenr to the list
1144 *
1145 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1146 */
1147static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1148			  struct btrfs_root *extent_root,
1149			  struct btrfs_path *path,
1150			  int info_level, struct preftrees *preftrees,
1151			  struct share_check *sc)
1152{
1153	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1154	int ret;
1155	int slot;
1156	struct extent_buffer *leaf;
1157	struct btrfs_key key;
1158
1159	while (1) {
1160		ret = btrfs_next_item(extent_root, path);
1161		if (ret < 0)
1162			break;
1163		if (ret) {
1164			ret = 0;
1165			break;
1166		}
1167
1168		slot = path->slots[0];
1169		leaf = path->nodes[0];
1170		btrfs_item_key_to_cpu(leaf, &key, slot);
1171
1172		if (key.objectid != ctx->bytenr)
1173			break;
1174		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1175			continue;
1176		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1177			break;
1178
1179		switch (key.type) {
1180		case BTRFS_SHARED_BLOCK_REF_KEY:
1181			/* SHARED DIRECT METADATA backref */
1182			ret = add_direct_ref(fs_info, preftrees,
1183					     info_level + 1, key.offset,
1184					     ctx->bytenr, 1, NULL, GFP_NOFS);
1185			break;
1186		case BTRFS_SHARED_DATA_REF_KEY: {
1187			/* SHARED DIRECT FULL backref */
1188			struct btrfs_shared_data_ref *sdref;
1189			int count;
1190
1191			sdref = btrfs_item_ptr(leaf, slot,
1192					      struct btrfs_shared_data_ref);
1193			count = btrfs_shared_data_ref_count(leaf, sdref);
1194			ret = add_direct_ref(fs_info, preftrees, 0,
1195					     key.offset, ctx->bytenr, count,
1196					     sc, GFP_NOFS);
1197			break;
1198		}
1199		case BTRFS_TREE_BLOCK_REF_KEY:
1200			/* NORMAL INDIRECT METADATA backref */
1201			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1202					       NULL, info_level + 1, ctx->bytenr,
1203					       1, NULL, GFP_NOFS);
1204			break;
1205		case BTRFS_EXTENT_DATA_REF_KEY: {
1206			/* NORMAL INDIRECT DATA backref */
1207			struct btrfs_extent_data_ref *dref;
1208			int count;
1209			u64 root;
1210
1211			dref = btrfs_item_ptr(leaf, slot,
1212					      struct btrfs_extent_data_ref);
1213			count = btrfs_extent_data_ref_count(leaf, dref);
1214			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1215								      dref);
1216			key.type = BTRFS_EXTENT_DATA_KEY;
1217			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1218
1219			if (sc && key.objectid != sc->inum &&
1220			    !sc->have_delayed_delete_refs) {
1221				ret = BACKREF_FOUND_SHARED;
1222				break;
1223			}
1224
1225			root = btrfs_extent_data_ref_root(leaf, dref);
1226
1227			if (!ctx->skip_data_ref ||
1228			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1229						ctx->user_ctx))
1230				ret = add_indirect_ref(fs_info, preftrees, root,
1231						       &key, 0, ctx->bytenr,
1232						       count, sc, GFP_NOFS);
1233			break;
1234		}
1235		default:
1236			WARN_ON(1);
1237		}
1238		if (ret)
1239			return ret;
1240
1241	}
1242
1243	return ret;
1244}
1245
1246/*
1247 * The caller has joined a transaction or is holding a read lock on the
1248 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1249 * snapshot field changing while updating or checking the cache.
1250 */
1251static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1252					struct btrfs_root *root,
1253					u64 bytenr, int level, bool *is_shared)
1254{
1255	struct btrfs_backref_shared_cache_entry *entry;
1256
1257	if (!ctx->use_path_cache)
1258		return false;
1259
1260	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1261		return false;
1262
1263	/*
1264	 * Level -1 is used for the data extent, which is not reliable to cache
1265	 * because its reference count can increase or decrease without us
1266	 * realizing. We cache results only for extent buffers that lead from
1267	 * the root node down to the leaf with the file extent item.
1268	 */
1269	ASSERT(level >= 0);
1270
1271	entry = &ctx->path_cache_entries[level];
1272
1273	/* Unused cache entry or being used for some other extent buffer. */
1274	if (entry->bytenr != bytenr)
1275		return false;
1276
1277	/*
1278	 * We cached a false result, but the last snapshot generation of the
1279	 * root changed, so we now have a snapshot. Don't trust the result.
1280	 */
1281	if (!entry->is_shared &&
1282	    entry->gen != btrfs_root_last_snapshot(&root->root_item))
1283		return false;
1284
1285	/*
1286	 * If we cached a true result and the last generation used for dropping
1287	 * a root changed, we can not trust the result, because the dropped root
1288	 * could be a snapshot sharing this extent buffer.
1289	 */
1290	if (entry->is_shared &&
1291	    entry->gen != btrfs_get_last_root_drop_gen(root->fs_info))
1292		return false;
1293
1294	*is_shared = entry->is_shared;
1295	/*
1296	 * If the node at this level is shared, than all nodes below are also
1297	 * shared. Currently some of the nodes below may be marked as not shared
1298	 * because we have just switched from one leaf to another, and switched
1299	 * also other nodes above the leaf and below the current level, so mark
1300	 * them as shared.
1301	 */
1302	if (*is_shared) {
1303		for (int i = 0; i < level; i++) {
1304			ctx->path_cache_entries[i].is_shared = true;
1305			ctx->path_cache_entries[i].gen = entry->gen;
1306		}
1307	}
1308
1309	return true;
1310}
1311
1312/*
1313 * The caller has joined a transaction or is holding a read lock on the
1314 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1315 * snapshot field changing while updating or checking the cache.
1316 */
1317static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1318				       struct btrfs_root *root,
1319				       u64 bytenr, int level, bool is_shared)
1320{
1321	struct btrfs_backref_shared_cache_entry *entry;
1322	u64 gen;
1323
1324	if (!ctx->use_path_cache)
1325		return;
1326
1327	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1328		return;
1329
1330	/*
1331	 * Level -1 is used for the data extent, which is not reliable to cache
1332	 * because its reference count can increase or decrease without us
1333	 * realizing. We cache results only for extent buffers that lead from
1334	 * the root node down to the leaf with the file extent item.
1335	 */
1336	ASSERT(level >= 0);
1337
1338	if (is_shared)
1339		gen = btrfs_get_last_root_drop_gen(root->fs_info);
1340	else
1341		gen = btrfs_root_last_snapshot(&root->root_item);
1342
1343	entry = &ctx->path_cache_entries[level];
1344	entry->bytenr = bytenr;
1345	entry->is_shared = is_shared;
1346	entry->gen = gen;
1347
1348	/*
1349	 * If we found an extent buffer is shared, set the cache result for all
1350	 * extent buffers below it to true. As nodes in the path are COWed,
1351	 * their sharedness is moved to their children, and if a leaf is COWed,
1352	 * then the sharedness of a data extent becomes direct, the refcount of
1353	 * data extent is increased in the extent item at the extent tree.
1354	 */
1355	if (is_shared) {
1356		for (int i = 0; i < level; i++) {
1357			entry = &ctx->path_cache_entries[i];
1358			entry->is_shared = is_shared;
1359			entry->gen = gen;
1360		}
1361	}
1362}
1363
1364/*
1365 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1366 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1367 * indirect refs to their parent bytenr.
1368 * When roots are found, they're added to the roots list
1369 *
1370 * @ctx:     Backref walking context object, must be not NULL.
1371 * @sc:      If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1372 *           shared extent is detected.
1373 *
1374 * Otherwise this returns 0 for success and <0 for an error.
1375 *
1376 * FIXME some caching might speed things up
1377 */
1378static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1379			     struct share_check *sc)
1380{
1381	struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1382	struct btrfs_key key;
1383	struct btrfs_path *path;
1384	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1385	struct btrfs_delayed_ref_head *head;
1386	int info_level = 0;
1387	int ret;
1388	struct prelim_ref *ref;
1389	struct rb_node *node;
1390	struct extent_inode_elem *eie = NULL;
1391	struct preftrees preftrees = {
1392		.direct = PREFTREE_INIT,
1393		.indirect = PREFTREE_INIT,
1394		.indirect_missing_keys = PREFTREE_INIT
1395	};
1396
1397	/* Roots ulist is not needed when using a sharedness check context. */
1398	if (sc)
1399		ASSERT(ctx->roots == NULL);
1400
1401	key.objectid = ctx->bytenr;
1402	key.offset = (u64)-1;
1403	if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1404		key.type = BTRFS_METADATA_ITEM_KEY;
1405	else
1406		key.type = BTRFS_EXTENT_ITEM_KEY;
1407
1408	path = btrfs_alloc_path();
1409	if (!path)
1410		return -ENOMEM;
1411	if (!ctx->trans) {
1412		path->search_commit_root = 1;
1413		path->skip_locking = 1;
1414	}
1415
1416	if (ctx->time_seq == BTRFS_SEQ_LAST)
1417		path->skip_locking = 1;
1418
1419again:
1420	head = NULL;
1421
1422	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1423	if (ret < 0)
1424		goto out;
1425	if (ret == 0) {
1426		/* This shouldn't happen, indicates a bug or fs corruption. */
1427		ASSERT(ret != 0);
1428		ret = -EUCLEAN;
1429		goto out;
1430	}
1431
1432	if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1433	    ctx->time_seq != BTRFS_SEQ_LAST) {
1434		/*
1435		 * We have a specific time_seq we care about and trans which
1436		 * means we have the path lock, we need to grab the ref head and
1437		 * lock it so we have a consistent view of the refs at the given
1438		 * time.
1439		 */
1440		delayed_refs = &ctx->trans->transaction->delayed_refs;
1441		spin_lock(&delayed_refs->lock);
1442		head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
1443		if (head) {
1444			if (!mutex_trylock(&head->mutex)) {
1445				refcount_inc(&head->refs);
1446				spin_unlock(&delayed_refs->lock);
1447
1448				btrfs_release_path(path);
1449
1450				/*
1451				 * Mutex was contended, block until it's
1452				 * released and try again
1453				 */
1454				mutex_lock(&head->mutex);
1455				mutex_unlock(&head->mutex);
1456				btrfs_put_delayed_ref_head(head);
1457				goto again;
1458			}
1459			spin_unlock(&delayed_refs->lock);
1460			ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1461					       &preftrees, sc);
1462			mutex_unlock(&head->mutex);
1463			if (ret)
1464				goto out;
1465		} else {
1466			spin_unlock(&delayed_refs->lock);
1467		}
1468	}
1469
1470	if (path->slots[0]) {
1471		struct extent_buffer *leaf;
1472		int slot;
1473
1474		path->slots[0]--;
1475		leaf = path->nodes[0];
1476		slot = path->slots[0];
1477		btrfs_item_key_to_cpu(leaf, &key, slot);
1478		if (key.objectid == ctx->bytenr &&
1479		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1480		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1481			ret = add_inline_refs(ctx, path, &info_level,
1482					      &preftrees, sc);
1483			if (ret)
1484				goto out;
1485			ret = add_keyed_refs(ctx, root, path, info_level,
1486					     &preftrees, sc);
1487			if (ret)
1488				goto out;
1489		}
1490	}
1491
1492	/*
1493	 * If we have a share context and we reached here, it means the extent
1494	 * is not directly shared (no multiple reference items for it),
1495	 * otherwise we would have exited earlier with a return value of
1496	 * BACKREF_FOUND_SHARED after processing delayed references or while
1497	 * processing inline or keyed references from the extent tree.
1498	 * The extent may however be indirectly shared through shared subtrees
1499	 * as a result from creating snapshots, so we determine below what is
1500	 * its parent node, in case we are dealing with a metadata extent, or
1501	 * what's the leaf (or leaves), from a fs tree, that has a file extent
1502	 * item pointing to it in case we are dealing with a data extent.
1503	 */
1504	ASSERT(extent_is_shared(sc) == 0);
1505
1506	/*
1507	 * If we are here for a data extent and we have a share_check structure
1508	 * it means the data extent is not directly shared (does not have
1509	 * multiple reference items), so we have to check if a path in the fs
1510	 * tree (going from the root node down to the leaf that has the file
1511	 * extent item pointing to the data extent) is shared, that is, if any
1512	 * of the extent buffers in the path is referenced by other trees.
1513	 */
1514	if (sc && ctx->bytenr == sc->data_bytenr) {
1515		/*
1516		 * If our data extent is from a generation more recent than the
1517		 * last generation used to snapshot the root, then we know that
1518		 * it can not be shared through subtrees, so we can skip
1519		 * resolving indirect references, there's no point in
1520		 * determining the extent buffers for the path from the fs tree
1521		 * root node down to the leaf that has the file extent item that
1522		 * points to the data extent.
1523		 */
1524		if (sc->data_extent_gen >
1525		    btrfs_root_last_snapshot(&sc->root->root_item)) {
1526			ret = BACKREF_FOUND_NOT_SHARED;
1527			goto out;
1528		}
1529
1530		/*
1531		 * If we are only determining if a data extent is shared or not
1532		 * and the corresponding file extent item is located in the same
1533		 * leaf as the previous file extent item, we can skip resolving
1534		 * indirect references for a data extent, since the fs tree path
1535		 * is the same (same leaf, so same path). We skip as long as the
1536		 * cached result for the leaf is valid and only if there's only
1537		 * one file extent item pointing to the data extent, because in
1538		 * the case of multiple file extent items, they may be located
1539		 * in different leaves and therefore we have multiple paths.
1540		 */
1541		if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1542		    sc->self_ref_count == 1) {
1543			bool cached;
1544			bool is_shared;
1545
1546			cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1547						     sc->ctx->curr_leaf_bytenr,
1548						     0, &is_shared);
1549			if (cached) {
1550				if (is_shared)
1551					ret = BACKREF_FOUND_SHARED;
1552				else
1553					ret = BACKREF_FOUND_NOT_SHARED;
1554				goto out;
1555			}
1556		}
1557	}
1558
1559	btrfs_release_path(path);
1560
1561	ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1562	if (ret)
1563		goto out;
1564
1565	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1566
1567	ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
1568	if (ret)
1569		goto out;
1570
1571	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1572
1573	/*
1574	 * This walks the tree of merged and resolved refs. Tree blocks are
1575	 * read in as needed. Unique entries are added to the ulist, and
1576	 * the list of found roots is updated.
1577	 *
1578	 * We release the entire tree in one go before returning.
1579	 */
1580	node = rb_first_cached(&preftrees.direct.root);
1581	while (node) {
1582		ref = rb_entry(node, struct prelim_ref, rbnode);
1583		node = rb_next(&ref->rbnode);
1584		/*
1585		 * ref->count < 0 can happen here if there are delayed
1586		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1587		 * prelim_ref_insert() relies on this when merging
1588		 * identical refs to keep the overall count correct.
1589		 * prelim_ref_insert() will merge only those refs
1590		 * which compare identically.  Any refs having
1591		 * e.g. different offsets would not be merged,
1592		 * and would retain their original ref->count < 0.
1593		 */
1594		if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
1595			/* no parent == root of tree */
1596			ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1597			if (ret < 0)
1598				goto out;
1599		}
1600		if (ref->count && ref->parent) {
1601			if (!ctx->ignore_extent_item_pos && !ref->inode_list &&
1602			    ref->level == 0) {
1603				struct btrfs_tree_parent_check check = { 0 };
1604				struct extent_buffer *eb;
1605
1606				check.level = ref->level;
1607
1608				eb = read_tree_block(ctx->fs_info, ref->parent,
1609						     &check);
1610				if (IS_ERR(eb)) {
1611					ret = PTR_ERR(eb);
1612					goto out;
1613				}
1614				if (!extent_buffer_uptodate(eb)) {
1615					free_extent_buffer(eb);
1616					ret = -EIO;
1617					goto out;
1618				}
1619
1620				if (!path->skip_locking)
1621					btrfs_tree_read_lock(eb);
1622				ret = find_extent_in_eb(ctx, eb, &eie);
1623				if (!path->skip_locking)
1624					btrfs_tree_read_unlock(eb);
1625				free_extent_buffer(eb);
1626				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1627				    ret < 0)
1628					goto out;
1629				ref->inode_list = eie;
1630				/*
1631				 * We transferred the list ownership to the ref,
1632				 * so set to NULL to avoid a double free in case
1633				 * an error happens after this.
1634				 */
1635				eie = NULL;
1636			}
1637			ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1638						  ref->inode_list,
1639						  (void **)&eie, GFP_NOFS);
1640			if (ret < 0)
1641				goto out;
1642			if (!ret && !ctx->ignore_extent_item_pos) {
1643				/*
1644				 * We've recorded that parent, so we must extend
1645				 * its inode list here.
1646				 *
1647				 * However if there was corruption we may not
1648				 * have found an eie, return an error in this
1649				 * case.
1650				 */
1651				ASSERT(eie);
1652				if (!eie) {
1653					ret = -EUCLEAN;
1654					goto out;
1655				}
1656				while (eie->next)
1657					eie = eie->next;
1658				eie->next = ref->inode_list;
1659			}
1660			eie = NULL;
1661			/*
1662			 * We have transferred the inode list ownership from
1663			 * this ref to the ref we added to the 'refs' ulist.
1664			 * So set this ref's inode list to NULL to avoid
1665			 * use-after-free when our caller uses it or double
1666			 * frees in case an error happens before we return.
1667			 */
1668			ref->inode_list = NULL;
1669		}
1670		cond_resched();
1671	}
1672
1673out:
1674	btrfs_free_path(path);
1675
1676	prelim_release(&preftrees.direct);
1677	prelim_release(&preftrees.indirect);
1678	prelim_release(&preftrees.indirect_missing_keys);
1679
1680	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1681		free_inode_elem_list(eie);
1682	return ret;
1683}
1684
1685/*
1686 * Finds all leaves with a reference to the specified combination of
1687 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1688 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1689 * function. The caller should free the ulist with free_leaf_list() if
1690 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1691 * enough.
1692 *
1693 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1694 */
1695int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
1696{
1697	int ret;
1698
1699	ASSERT(ctx->refs == NULL);
1700
1701	ctx->refs = ulist_alloc(GFP_NOFS);
1702	if (!ctx->refs)
1703		return -ENOMEM;
1704
1705	ret = find_parent_nodes(ctx, NULL);
1706	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1707	    (ret < 0 && ret != -ENOENT)) {
1708		free_leaf_list(ctx->refs);
1709		ctx->refs = NULL;
1710		return ret;
1711	}
1712
1713	return 0;
1714}
1715
1716/*
1717 * Walk all backrefs for a given extent to find all roots that reference this
1718 * extent. Walking a backref means finding all extents that reference this
1719 * extent and in turn walk the backrefs of those, too. Naturally this is a
1720 * recursive process, but here it is implemented in an iterative fashion: We
1721 * find all referencing extents for the extent in question and put them on a
1722 * list. In turn, we find all referencing extents for those, further appending
1723 * to the list. The way we iterate the list allows adding more elements after
1724 * the current while iterating. The process stops when we reach the end of the
1725 * list.
1726 *
1727 * Found roots are added to @ctx->roots, which is allocated by this function if
1728 * it points to NULL, in which case the caller is responsible for freeing it
1729 * after it's not needed anymore.
1730 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1731 * ulist to do temporary work, and frees it before returning.
1732 *
1733 * Returns 0 on success, < 0 on error.
1734 */
1735static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
1736{
1737	const u64 orig_bytenr = ctx->bytenr;
1738	const bool orig_ignore_extent_item_pos = ctx->ignore_extent_item_pos;
1739	bool roots_ulist_allocated = false;
1740	struct ulist_iterator uiter;
1741	int ret = 0;
1742
1743	ASSERT(ctx->refs == NULL);
1744
1745	ctx->refs = ulist_alloc(GFP_NOFS);
1746	if (!ctx->refs)
1747		return -ENOMEM;
1748
1749	if (!ctx->roots) {
1750		ctx->roots = ulist_alloc(GFP_NOFS);
1751		if (!ctx->roots) {
1752			ulist_free(ctx->refs);
1753			ctx->refs = NULL;
1754			return -ENOMEM;
1755		}
1756		roots_ulist_allocated = true;
1757	}
1758
1759	ctx->ignore_extent_item_pos = true;
1760
1761	ULIST_ITER_INIT(&uiter);
1762	while (1) {
1763		struct ulist_node *node;
1764
1765		ret = find_parent_nodes(ctx, NULL);
1766		if (ret < 0 && ret != -ENOENT) {
1767			if (roots_ulist_allocated) {
1768				ulist_free(ctx->roots);
1769				ctx->roots = NULL;
1770			}
1771			break;
1772		}
1773		ret = 0;
1774		node = ulist_next(ctx->refs, &uiter);
1775		if (!node)
1776			break;
1777		ctx->bytenr = node->val;
1778		cond_resched();
1779	}
1780
1781	ulist_free(ctx->refs);
1782	ctx->refs = NULL;
1783	ctx->bytenr = orig_bytenr;
1784	ctx->ignore_extent_item_pos = orig_ignore_extent_item_pos;
1785
1786	return ret;
1787}
1788
1789int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1790			 bool skip_commit_root_sem)
1791{
1792	int ret;
1793
1794	if (!ctx->trans && !skip_commit_root_sem)
1795		down_read(&ctx->fs_info->commit_root_sem);
1796	ret = btrfs_find_all_roots_safe(ctx);
1797	if (!ctx->trans && !skip_commit_root_sem)
1798		up_read(&ctx->fs_info->commit_root_sem);
1799	return ret;
1800}
1801
1802struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1803{
1804	struct btrfs_backref_share_check_ctx *ctx;
1805
1806	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1807	if (!ctx)
1808		return NULL;
1809
1810	ulist_init(&ctx->refs);
1811
1812	return ctx;
1813}
1814
1815void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1816{
1817	if (!ctx)
1818		return;
1819
1820	ulist_release(&ctx->refs);
1821	kfree(ctx);
1822}
1823
1824/*
1825 * Check if a data extent is shared or not.
1826 *
1827 * @inode:       The inode whose extent we are checking.
1828 * @bytenr:      Logical bytenr of the extent we are checking.
1829 * @extent_gen:  Generation of the extent (file extent item) or 0 if it is
1830 *               not known.
1831 * @ctx:         A backref sharedness check context.
1832 *
1833 * btrfs_is_data_extent_shared uses the backref walking code but will short
1834 * circuit as soon as it finds a root or inode that doesn't match the
1835 * one passed in. This provides a significant performance benefit for
1836 * callers (such as fiemap) which want to know whether the extent is
1837 * shared but do not need a ref count.
1838 *
1839 * This attempts to attach to the running transaction in order to account for
1840 * delayed refs, but continues on even when no running transaction exists.
1841 *
1842 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1843 */
1844int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1845				u64 extent_gen,
1846				struct btrfs_backref_share_check_ctx *ctx)
1847{
1848	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1849	struct btrfs_root *root = inode->root;
1850	struct btrfs_fs_info *fs_info = root->fs_info;
1851	struct btrfs_trans_handle *trans;
1852	struct ulist_iterator uiter;
1853	struct ulist_node *node;
1854	struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1855	int ret = 0;
1856	struct share_check shared = {
1857		.ctx = ctx,
1858		.root = root,
1859		.inum = btrfs_ino(inode),
1860		.data_bytenr = bytenr,
1861		.data_extent_gen = extent_gen,
1862		.share_count = 0,
1863		.self_ref_count = 0,
1864		.have_delayed_delete_refs = false,
1865	};
1866	int level;
1867
1868	for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1869		if (ctx->prev_extents_cache[i].bytenr == bytenr)
1870			return ctx->prev_extents_cache[i].is_shared;
1871	}
1872
1873	ulist_init(&ctx->refs);
1874
1875	trans = btrfs_join_transaction_nostart(root);
1876	if (IS_ERR(trans)) {
1877		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1878			ret = PTR_ERR(trans);
1879			goto out;
1880		}
1881		trans = NULL;
1882		down_read(&fs_info->commit_root_sem);
1883	} else {
1884		btrfs_get_tree_mod_seq(fs_info, &elem);
1885		walk_ctx.time_seq = elem.seq;
1886	}
1887
1888	walk_ctx.ignore_extent_item_pos = true;
1889	walk_ctx.trans = trans;
1890	walk_ctx.fs_info = fs_info;
1891	walk_ctx.refs = &ctx->refs;
1892
1893	/* -1 means we are in the bytenr of the data extent. */
1894	level = -1;
1895	ULIST_ITER_INIT(&uiter);
1896	ctx->use_path_cache = true;
1897	while (1) {
1898		bool is_shared;
1899		bool cached;
1900
1901		walk_ctx.bytenr = bytenr;
1902		ret = find_parent_nodes(&walk_ctx, &shared);
1903		if (ret == BACKREF_FOUND_SHARED ||
1904		    ret == BACKREF_FOUND_NOT_SHARED) {
1905			/* If shared must return 1, otherwise return 0. */
1906			ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1907			if (level >= 0)
1908				store_backref_shared_cache(ctx, root, bytenr,
1909							   level, ret == 1);
1910			break;
1911		}
1912		if (ret < 0 && ret != -ENOENT)
1913			break;
1914		ret = 0;
1915
1916		/*
1917		 * If our data extent was not directly shared (without multiple
1918		 * reference items), than it might have a single reference item
1919		 * with a count > 1 for the same offset, which means there are 2
1920		 * (or more) file extent items that point to the data extent -
1921		 * this happens when a file extent item needs to be split and
1922		 * then one item gets moved to another leaf due to a b+tree leaf
1923		 * split when inserting some item. In this case the file extent
1924		 * items may be located in different leaves and therefore some
1925		 * of the leaves may be referenced through shared subtrees while
1926		 * others are not. Since our extent buffer cache only works for
1927		 * a single path (by far the most common case and simpler to
1928		 * deal with), we can not use it if we have multiple leaves
1929		 * (which implies multiple paths).
1930		 */
1931		if (level == -1 && ctx->refs.nnodes > 1)
1932			ctx->use_path_cache = false;
1933
1934		if (level >= 0)
1935			store_backref_shared_cache(ctx, root, bytenr,
1936						   level, false);
1937		node = ulist_next(&ctx->refs, &uiter);
1938		if (!node)
1939			break;
1940		bytenr = node->val;
1941		level++;
1942		cached = lookup_backref_shared_cache(ctx, root, bytenr, level,
1943						     &is_shared);
1944		if (cached) {
1945			ret = (is_shared ? 1 : 0);
1946			break;
1947		}
1948		shared.share_count = 0;
1949		shared.have_delayed_delete_refs = false;
1950		cond_resched();
1951	}
1952
1953	/*
1954	 * Cache the sharedness result for the data extent if we know our inode
1955	 * has more than 1 file extent item that refers to the data extent.
1956	 */
1957	if (ret >= 0 && shared.self_ref_count > 1) {
1958		int slot = ctx->prev_extents_cache_slot;
1959
1960		ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
1961		ctx->prev_extents_cache[slot].is_shared = (ret == 1);
1962
1963		slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
1964		ctx->prev_extents_cache_slot = slot;
1965	}
1966
1967	if (trans) {
1968		btrfs_put_tree_mod_seq(fs_info, &elem);
1969		btrfs_end_transaction(trans);
1970	} else {
1971		up_read(&fs_info->commit_root_sem);
1972	}
1973out:
1974	ulist_release(&ctx->refs);
1975	ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
1976
1977	return ret;
1978}
1979
1980int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1981			  u64 start_off, struct btrfs_path *path,
1982			  struct btrfs_inode_extref **ret_extref,
1983			  u64 *found_off)
1984{
1985	int ret, slot;
1986	struct btrfs_key key;
1987	struct btrfs_key found_key;
1988	struct btrfs_inode_extref *extref;
1989	const struct extent_buffer *leaf;
1990	unsigned long ptr;
1991
1992	key.objectid = inode_objectid;
1993	key.type = BTRFS_INODE_EXTREF_KEY;
1994	key.offset = start_off;
1995
1996	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1997	if (ret < 0)
1998		return ret;
1999
2000	while (1) {
2001		leaf = path->nodes[0];
2002		slot = path->slots[0];
2003		if (slot >= btrfs_header_nritems(leaf)) {
2004			/*
2005			 * If the item at offset is not found,
2006			 * btrfs_search_slot will point us to the slot
2007			 * where it should be inserted. In our case
2008			 * that will be the slot directly before the
2009			 * next INODE_REF_KEY_V2 item. In the case
2010			 * that we're pointing to the last slot in a
2011			 * leaf, we must move one leaf over.
2012			 */
2013			ret = btrfs_next_leaf(root, path);
2014			if (ret) {
2015				if (ret >= 1)
2016					ret = -ENOENT;
2017				break;
2018			}
2019			continue;
2020		}
2021
2022		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2023
2024		/*
2025		 * Check that we're still looking at an extended ref key for
2026		 * this particular objectid. If we have different
2027		 * objectid or type then there are no more to be found
2028		 * in the tree and we can exit.
2029		 */
2030		ret = -ENOENT;
2031		if (found_key.objectid != inode_objectid)
2032			break;
2033		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2034			break;
2035
2036		ret = 0;
2037		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2038		extref = (struct btrfs_inode_extref *)ptr;
2039		*ret_extref = extref;
2040		if (found_off)
2041			*found_off = found_key.offset;
2042		break;
2043	}
2044
2045	return ret;
2046}
2047
2048/*
2049 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2050 * Elements of the path are separated by '/' and the path is guaranteed to be
2051 * 0-terminated. the path is only given within the current file system.
2052 * Therefore, it never starts with a '/'. the caller is responsible to provide
2053 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2054 * the start point of the resulting string is returned. this pointer is within
2055 * dest, normally.
2056 * in case the path buffer would overflow, the pointer is decremented further
2057 * as if output was written to the buffer, though no more output is actually
2058 * generated. that way, the caller can determine how much space would be
2059 * required for the path to fit into the buffer. in that case, the returned
2060 * value will be smaller than dest. callers must check this!
2061 */
2062char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2063			u32 name_len, unsigned long name_off,
2064			struct extent_buffer *eb_in, u64 parent,
2065			char *dest, u32 size)
2066{
2067	int slot;
2068	u64 next_inum;
2069	int ret;
2070	s64 bytes_left = ((s64)size) - 1;
2071	struct extent_buffer *eb = eb_in;
2072	struct btrfs_key found_key;
2073	struct btrfs_inode_ref *iref;
2074
2075	if (bytes_left >= 0)
2076		dest[bytes_left] = '\0';
2077
2078	while (1) {
2079		bytes_left -= name_len;
2080		if (bytes_left >= 0)
2081			read_extent_buffer(eb, dest + bytes_left,
2082					   name_off, name_len);
2083		if (eb != eb_in) {
2084			if (!path->skip_locking)
2085				btrfs_tree_read_unlock(eb);
2086			free_extent_buffer(eb);
2087		}
2088		ret = btrfs_find_item(fs_root, path, parent, 0,
2089				BTRFS_INODE_REF_KEY, &found_key);
2090		if (ret > 0)
2091			ret = -ENOENT;
2092		if (ret)
2093			break;
2094
2095		next_inum = found_key.offset;
2096
2097		/* regular exit ahead */
2098		if (parent == next_inum)
2099			break;
2100
2101		slot = path->slots[0];
2102		eb = path->nodes[0];
2103		/* make sure we can use eb after releasing the path */
2104		if (eb != eb_in) {
2105			path->nodes[0] = NULL;
2106			path->locks[0] = 0;
2107		}
2108		btrfs_release_path(path);
2109		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2110
2111		name_len = btrfs_inode_ref_name_len(eb, iref);
2112		name_off = (unsigned long)(iref + 1);
2113
2114		parent = next_inum;
2115		--bytes_left;
2116		if (bytes_left >= 0)
2117			dest[bytes_left] = '/';
2118	}
2119
2120	btrfs_release_path(path);
2121
2122	if (ret)
2123		return ERR_PTR(ret);
2124
2125	return dest + bytes_left;
2126}
2127
2128/*
2129 * this makes the path point to (logical EXTENT_ITEM *)
2130 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2131 * tree blocks and <0 on error.
2132 */
2133int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2134			struct btrfs_path *path, struct btrfs_key *found_key,
2135			u64 *flags_ret)
2136{
2137	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2138	int ret;
2139	u64 flags;
2140	u64 size = 0;
2141	u32 item_size;
2142	const struct extent_buffer *eb;
2143	struct btrfs_extent_item *ei;
2144	struct btrfs_key key;
2145
2146	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2147		key.type = BTRFS_METADATA_ITEM_KEY;
2148	else
2149		key.type = BTRFS_EXTENT_ITEM_KEY;
2150	key.objectid = logical;
2151	key.offset = (u64)-1;
2152
2153	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2154	if (ret < 0)
2155		return ret;
2156
2157	ret = btrfs_previous_extent_item(extent_root, path, 0);
2158	if (ret) {
2159		if (ret > 0)
2160			ret = -ENOENT;
2161		return ret;
2162	}
2163	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2164	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2165		size = fs_info->nodesize;
2166	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2167		size = found_key->offset;
2168
2169	if (found_key->objectid > logical ||
2170	    found_key->objectid + size <= logical) {
2171		btrfs_debug(fs_info,
2172			"logical %llu is not within any extent", logical);
2173		return -ENOENT;
2174	}
2175
2176	eb = path->nodes[0];
2177	item_size = btrfs_item_size(eb, path->slots[0]);
2178	BUG_ON(item_size < sizeof(*ei));
2179
2180	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2181	flags = btrfs_extent_flags(eb, ei);
2182
2183	btrfs_debug(fs_info,
2184		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2185		 logical, logical - found_key->objectid, found_key->objectid,
2186		 found_key->offset, flags, item_size);
2187
2188	WARN_ON(!flags_ret);
2189	if (flags_ret) {
2190		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2191			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2192		else if (flags & BTRFS_EXTENT_FLAG_DATA)
2193			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
2194		else
2195			BUG();
2196		return 0;
2197	}
2198
2199	return -EIO;
2200}
2201
2202/*
2203 * helper function to iterate extent inline refs. ptr must point to a 0 value
2204 * for the first call and may be modified. it is used to track state.
2205 * if more refs exist, 0 is returned and the next call to
2206 * get_extent_inline_ref must pass the modified ptr parameter to get the
2207 * next ref. after the last ref was processed, 1 is returned.
2208 * returns <0 on error
2209 */
2210static int get_extent_inline_ref(unsigned long *ptr,
2211				 const struct extent_buffer *eb,
2212				 const struct btrfs_key *key,
2213				 const struct btrfs_extent_item *ei,
2214				 u32 item_size,
2215				 struct btrfs_extent_inline_ref **out_eiref,
2216				 int *out_type)
2217{
2218	unsigned long end;
2219	u64 flags;
2220	struct btrfs_tree_block_info *info;
2221
2222	if (!*ptr) {
2223		/* first call */
2224		flags = btrfs_extent_flags(eb, ei);
2225		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2226			if (key->type == BTRFS_METADATA_ITEM_KEY) {
2227				/* a skinny metadata extent */
2228				*out_eiref =
2229				     (struct btrfs_extent_inline_ref *)(ei + 1);
2230			} else {
2231				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2232				info = (struct btrfs_tree_block_info *)(ei + 1);
2233				*out_eiref =
2234				   (struct btrfs_extent_inline_ref *)(info + 1);
2235			}
2236		} else {
2237			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2238		}
2239		*ptr = (unsigned long)*out_eiref;
2240		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2241			return -ENOENT;
2242	}
2243
2244	end = (unsigned long)ei + item_size;
2245	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2246	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2247						     BTRFS_REF_TYPE_ANY);
2248	if (*out_type == BTRFS_REF_TYPE_INVALID)
2249		return -EUCLEAN;
2250
2251	*ptr += btrfs_extent_inline_ref_size(*out_type);
2252	WARN_ON(*ptr > end);
2253	if (*ptr == end)
2254		return 1; /* last */
2255
2256	return 0;
2257}
2258
2259/*
2260 * reads the tree block backref for an extent. tree level and root are returned
2261 * through out_level and out_root. ptr must point to a 0 value for the first
2262 * call and may be modified (see get_extent_inline_ref comment).
2263 * returns 0 if data was provided, 1 if there was no more data to provide or
2264 * <0 on error.
2265 */
2266int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2267			    struct btrfs_key *key, struct btrfs_extent_item *ei,
2268			    u32 item_size, u64 *out_root, u8 *out_level)
2269{
2270	int ret;
2271	int type;
2272	struct btrfs_extent_inline_ref *eiref;
2273
2274	if (*ptr == (unsigned long)-1)
2275		return 1;
2276
2277	while (1) {
2278		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2279					      &eiref, &type);
2280		if (ret < 0)
2281			return ret;
2282
2283		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2284		    type == BTRFS_SHARED_BLOCK_REF_KEY)
2285			break;
2286
2287		if (ret == 1)
2288			return 1;
2289	}
2290
2291	/* we can treat both ref types equally here */
2292	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2293
2294	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2295		struct btrfs_tree_block_info *info;
2296
2297		info = (struct btrfs_tree_block_info *)(ei + 1);
2298		*out_level = btrfs_tree_block_level(eb, info);
2299	} else {
2300		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2301		*out_level = (u8)key->offset;
2302	}
2303
2304	if (ret == 1)
2305		*ptr = (unsigned long)-1;
2306
2307	return 0;
2308}
2309
2310static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2311			     struct extent_inode_elem *inode_list,
2312			     u64 root, u64 extent_item_objectid,
2313			     iterate_extent_inodes_t *iterate, void *ctx)
2314{
2315	struct extent_inode_elem *eie;
2316	int ret = 0;
2317
2318	for (eie = inode_list; eie; eie = eie->next) {
2319		btrfs_debug(fs_info,
2320			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2321			    extent_item_objectid, eie->inum,
2322			    eie->offset, root);
2323		ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2324		if (ret) {
2325			btrfs_debug(fs_info,
2326				    "stopping iteration for %llu due to ret=%d",
2327				    extent_item_objectid, ret);
2328			break;
2329		}
2330	}
2331
2332	return ret;
2333}
2334
2335/*
2336 * calls iterate() for every inode that references the extent identified by
2337 * the given parameters.
2338 * when the iterator function returns a non-zero value, iteration stops.
2339 */
2340int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2341			  bool search_commit_root,
2342			  iterate_extent_inodes_t *iterate, void *user_ctx)
2343{
2344	int ret;
2345	struct ulist *refs;
2346	struct ulist_node *ref_node;
2347	struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2348	struct ulist_iterator ref_uiter;
2349
2350	btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2351		    ctx->bytenr);
2352
2353	ASSERT(ctx->trans == NULL);
2354	ASSERT(ctx->roots == NULL);
2355
2356	if (!search_commit_root) {
2357		struct btrfs_trans_handle *trans;
2358
2359		trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2360		if (IS_ERR(trans)) {
2361			if (PTR_ERR(trans) != -ENOENT &&
2362			    PTR_ERR(trans) != -EROFS)
2363				return PTR_ERR(trans);
2364			trans = NULL;
2365		}
2366		ctx->trans = trans;
2367	}
2368
2369	if (ctx->trans) {
2370		btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2371		ctx->time_seq = seq_elem.seq;
2372	} else {
2373		down_read(&ctx->fs_info->commit_root_sem);
2374	}
2375
2376	ret = btrfs_find_all_leafs(ctx);
2377	if (ret)
2378		goto out;
2379	refs = ctx->refs;
2380	ctx->refs = NULL;
2381
2382	ULIST_ITER_INIT(&ref_uiter);
2383	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2384		const u64 leaf_bytenr = ref_node->val;
2385		struct ulist_node *root_node;
2386		struct ulist_iterator root_uiter;
2387		struct extent_inode_elem *inode_list;
2388
2389		inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2390
2391		if (ctx->cache_lookup) {
2392			const u64 *root_ids;
2393			int root_count;
2394			bool cached;
2395
2396			cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2397						   &root_ids, &root_count);
2398			if (cached) {
2399				for (int i = 0; i < root_count; i++) {
2400					ret = iterate_leaf_refs(ctx->fs_info,
2401								inode_list,
2402								root_ids[i],
2403								leaf_bytenr,
2404								iterate,
2405								user_ctx);
2406					if (ret)
2407						break;
2408				}
2409				continue;
2410			}
2411		}
2412
2413		if (!ctx->roots) {
2414			ctx->roots = ulist_alloc(GFP_NOFS);
2415			if (!ctx->roots) {
2416				ret = -ENOMEM;
2417				break;
2418			}
2419		}
2420
2421		ctx->bytenr = leaf_bytenr;
2422		ret = btrfs_find_all_roots_safe(ctx);
2423		if (ret)
2424			break;
2425
2426		if (ctx->cache_store)
2427			ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2428
2429		ULIST_ITER_INIT(&root_uiter);
2430		while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2431			btrfs_debug(ctx->fs_info,
2432				    "root %llu references leaf %llu, data list %#llx",
2433				    root_node->val, ref_node->val,
2434				    ref_node->aux);
2435			ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2436						root_node->val, ctx->bytenr,
2437						iterate, user_ctx);
2438		}
2439		ulist_reinit(ctx->roots);
2440	}
2441
2442	free_leaf_list(refs);
2443out:
2444	if (ctx->trans) {
2445		btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2446		btrfs_end_transaction(ctx->trans);
2447		ctx->trans = NULL;
2448	} else {
2449		up_read(&ctx->fs_info->commit_root_sem);
2450	}
2451
2452	ulist_free(ctx->roots);
2453	ctx->roots = NULL;
2454
2455	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2456		ret = 0;
2457
2458	return ret;
2459}
2460
2461static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2462{
2463	struct btrfs_data_container *inodes = ctx;
2464	const size_t c = 3 * sizeof(u64);
2465
2466	if (inodes->bytes_left >= c) {
2467		inodes->bytes_left -= c;
2468		inodes->val[inodes->elem_cnt] = inum;
2469		inodes->val[inodes->elem_cnt + 1] = offset;
2470		inodes->val[inodes->elem_cnt + 2] = root;
2471		inodes->elem_cnt += 3;
2472	} else {
2473		inodes->bytes_missing += c - inodes->bytes_left;
2474		inodes->bytes_left = 0;
2475		inodes->elem_missed += 3;
2476	}
2477
2478	return 0;
2479}
2480
2481int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2482				struct btrfs_path *path,
2483				void *ctx, bool ignore_offset)
2484{
2485	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2486	int ret;
2487	u64 flags = 0;
2488	struct btrfs_key found_key;
2489	int search_commit_root = path->search_commit_root;
2490
2491	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2492	btrfs_release_path(path);
2493	if (ret < 0)
2494		return ret;
2495	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2496		return -EINVAL;
2497
2498	walk_ctx.bytenr = found_key.objectid;
2499	if (ignore_offset)
2500		walk_ctx.ignore_extent_item_pos = true;
2501	else
2502		walk_ctx.extent_item_pos = logical - found_key.objectid;
2503	walk_ctx.fs_info = fs_info;
2504
2505	return iterate_extent_inodes(&walk_ctx, search_commit_root,
2506				     build_ino_list, ctx);
2507}
2508
2509static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2510			 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2511
2512static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2513{
2514	int ret = 0;
2515	int slot;
2516	u32 cur;
2517	u32 len;
2518	u32 name_len;
2519	u64 parent = 0;
2520	int found = 0;
2521	struct btrfs_root *fs_root = ipath->fs_root;
2522	struct btrfs_path *path = ipath->btrfs_path;
2523	struct extent_buffer *eb;
2524	struct btrfs_inode_ref *iref;
2525	struct btrfs_key found_key;
2526
2527	while (!ret) {
2528		ret = btrfs_find_item(fs_root, path, inum,
2529				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2530				&found_key);
2531
2532		if (ret < 0)
2533			break;
2534		if (ret) {
2535			ret = found ? 0 : -ENOENT;
2536			break;
2537		}
2538		++found;
2539
2540		parent = found_key.offset;
2541		slot = path->slots[0];
2542		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2543		if (!eb) {
2544			ret = -ENOMEM;
2545			break;
2546		}
2547		btrfs_release_path(path);
2548
2549		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2550
2551		for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2552			name_len = btrfs_inode_ref_name_len(eb, iref);
2553			/* path must be released before calling iterate()! */
2554			btrfs_debug(fs_root->fs_info,
2555				"following ref at offset %u for inode %llu in tree %llu",
2556				cur, found_key.objectid,
2557				fs_root->root_key.objectid);
2558			ret = inode_to_path(parent, name_len,
2559				      (unsigned long)(iref + 1), eb, ipath);
2560			if (ret)
2561				break;
2562			len = sizeof(*iref) + name_len;
2563			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2564		}
2565		free_extent_buffer(eb);
2566	}
2567
2568	btrfs_release_path(path);
2569
2570	return ret;
2571}
2572
2573static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2574{
2575	int ret;
2576	int slot;
2577	u64 offset = 0;
2578	u64 parent;
2579	int found = 0;
2580	struct btrfs_root *fs_root = ipath->fs_root;
2581	struct btrfs_path *path = ipath->btrfs_path;
2582	struct extent_buffer *eb;
2583	struct btrfs_inode_extref *extref;
2584	u32 item_size;
2585	u32 cur_offset;
2586	unsigned long ptr;
2587
2588	while (1) {
2589		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2590					    &offset);
2591		if (ret < 0)
2592			break;
2593		if (ret) {
2594			ret = found ? 0 : -ENOENT;
2595			break;
2596		}
2597		++found;
2598
2599		slot = path->slots[0];
2600		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2601		if (!eb) {
2602			ret = -ENOMEM;
2603			break;
2604		}
2605		btrfs_release_path(path);
2606
2607		item_size = btrfs_item_size(eb, slot);
2608		ptr = btrfs_item_ptr_offset(eb, slot);
2609		cur_offset = 0;
2610
2611		while (cur_offset < item_size) {
2612			u32 name_len;
2613
2614			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2615			parent = btrfs_inode_extref_parent(eb, extref);
2616			name_len = btrfs_inode_extref_name_len(eb, extref);
2617			ret = inode_to_path(parent, name_len,
2618				      (unsigned long)&extref->name, eb, ipath);
2619			if (ret)
2620				break;
2621
2622			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2623			cur_offset += sizeof(*extref);
2624		}
2625		free_extent_buffer(eb);
2626
2627		offset++;
2628	}
2629
2630	btrfs_release_path(path);
2631
2632	return ret;
2633}
2634
2635/*
2636 * returns 0 if the path could be dumped (probably truncated)
2637 * returns <0 in case of an error
2638 */
2639static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2640			 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2641{
2642	char *fspath;
2643	char *fspath_min;
2644	int i = ipath->fspath->elem_cnt;
2645	const int s_ptr = sizeof(char *);
2646	u32 bytes_left;
2647
2648	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2649					ipath->fspath->bytes_left - s_ptr : 0;
2650
2651	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2652	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2653				   name_off, eb, inum, fspath_min, bytes_left);
2654	if (IS_ERR(fspath))
2655		return PTR_ERR(fspath);
2656
2657	if (fspath > fspath_min) {
2658		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2659		++ipath->fspath->elem_cnt;
2660		ipath->fspath->bytes_left = fspath - fspath_min;
2661	} else {
2662		++ipath->fspath->elem_missed;
2663		ipath->fspath->bytes_missing += fspath_min - fspath;
2664		ipath->fspath->bytes_left = 0;
2665	}
2666
2667	return 0;
2668}
2669
2670/*
2671 * this dumps all file system paths to the inode into the ipath struct, provided
2672 * is has been created large enough. each path is zero-terminated and accessed
2673 * from ipath->fspath->val[i].
2674 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2675 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2676 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2677 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2678 * have been needed to return all paths.
2679 */
2680int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2681{
2682	int ret;
2683	int found_refs = 0;
2684
2685	ret = iterate_inode_refs(inum, ipath);
2686	if (!ret)
2687		++found_refs;
2688	else if (ret != -ENOENT)
2689		return ret;
2690
2691	ret = iterate_inode_extrefs(inum, ipath);
2692	if (ret == -ENOENT && found_refs)
2693		return 0;
2694
2695	return ret;
2696}
2697
2698struct btrfs_data_container *init_data_container(u32 total_bytes)
2699{
2700	struct btrfs_data_container *data;
2701	size_t alloc_bytes;
2702
2703	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2704	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2705	if (!data)
2706		return ERR_PTR(-ENOMEM);
2707
2708	if (total_bytes >= sizeof(*data)) {
2709		data->bytes_left = total_bytes - sizeof(*data);
2710		data->bytes_missing = 0;
2711	} else {
2712		data->bytes_missing = sizeof(*data) - total_bytes;
2713		data->bytes_left = 0;
2714	}
2715
2716	data->elem_cnt = 0;
2717	data->elem_missed = 0;
2718
2719	return data;
2720}
2721
2722/*
2723 * allocates space to return multiple file system paths for an inode.
2724 * total_bytes to allocate are passed, note that space usable for actual path
2725 * information will be total_bytes - sizeof(struct inode_fs_paths).
2726 * the returned pointer must be freed with free_ipath() in the end.
2727 */
2728struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2729					struct btrfs_path *path)
2730{
2731	struct inode_fs_paths *ifp;
2732	struct btrfs_data_container *fspath;
2733
2734	fspath = init_data_container(total_bytes);
2735	if (IS_ERR(fspath))
2736		return ERR_CAST(fspath);
2737
2738	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2739	if (!ifp) {
2740		kvfree(fspath);
2741		return ERR_PTR(-ENOMEM);
2742	}
2743
2744	ifp->btrfs_path = path;
2745	ifp->fspath = fspath;
2746	ifp->fs_root = fs_root;
2747
2748	return ifp;
2749}
2750
2751void free_ipath(struct inode_fs_paths *ipath)
2752{
2753	if (!ipath)
2754		return;
2755	kvfree(ipath->fspath);
2756	kfree(ipath);
2757}
2758
2759struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
2760{
2761	struct btrfs_backref_iter *ret;
2762
2763	ret = kzalloc(sizeof(*ret), GFP_NOFS);
2764	if (!ret)
2765		return NULL;
2766
2767	ret->path = btrfs_alloc_path();
2768	if (!ret->path) {
2769		kfree(ret);
2770		return NULL;
2771	}
2772
2773	/* Current backref iterator only supports iteration in commit root */
2774	ret->path->search_commit_root = 1;
2775	ret->path->skip_locking = 1;
2776	ret->fs_info = fs_info;
2777
2778	return ret;
2779}
2780
2781int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2782{
2783	struct btrfs_fs_info *fs_info = iter->fs_info;
2784	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2785	struct btrfs_path *path = iter->path;
2786	struct btrfs_extent_item *ei;
2787	struct btrfs_key key;
2788	int ret;
2789
2790	key.objectid = bytenr;
2791	key.type = BTRFS_METADATA_ITEM_KEY;
2792	key.offset = (u64)-1;
2793	iter->bytenr = bytenr;
2794
2795	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2796	if (ret < 0)
2797		return ret;
2798	if (ret == 0) {
2799		ret = -EUCLEAN;
2800		goto release;
2801	}
2802	if (path->slots[0] == 0) {
2803		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2804		ret = -EUCLEAN;
2805		goto release;
2806	}
2807	path->slots[0]--;
2808
2809	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2810	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2811	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2812		ret = -ENOENT;
2813		goto release;
2814	}
2815	memcpy(&iter->cur_key, &key, sizeof(key));
2816	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2817						    path->slots[0]);
2818	iter->end_ptr = (u32)(iter->item_ptr +
2819			btrfs_item_size(path->nodes[0], path->slots[0]));
2820	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2821			    struct btrfs_extent_item);
2822
2823	/*
2824	 * Only support iteration on tree backref yet.
2825	 *
2826	 * This is an extra precaution for non skinny-metadata, where
2827	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2828	 * extent flags to determine if it's a tree block.
2829	 */
2830	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2831		ret = -ENOTSUPP;
2832		goto release;
2833	}
2834	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2835
2836	/* If there is no inline backref, go search for keyed backref */
2837	if (iter->cur_ptr >= iter->end_ptr) {
2838		ret = btrfs_next_item(extent_root, path);
2839
2840		/* No inline nor keyed ref */
2841		if (ret > 0) {
2842			ret = -ENOENT;
2843			goto release;
2844		}
2845		if (ret < 0)
2846			goto release;
2847
2848		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2849				path->slots[0]);
2850		if (iter->cur_key.objectid != bytenr ||
2851		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2852		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2853			ret = -ENOENT;
2854			goto release;
2855		}
2856		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2857							   path->slots[0]);
2858		iter->item_ptr = iter->cur_ptr;
2859		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2860				      path->nodes[0], path->slots[0]));
2861	}
2862
2863	return 0;
2864release:
2865	btrfs_backref_iter_release(iter);
2866	return ret;
2867}
2868
2869/*
2870 * Go to the next backref item of current bytenr, can be either inlined or
2871 * keyed.
2872 *
2873 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2874 *
2875 * Return 0 if we get next backref without problem.
2876 * Return >0 if there is no extra backref for this bytenr.
2877 * Return <0 if there is something wrong happened.
2878 */
2879int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2880{
2881	struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2882	struct btrfs_root *extent_root;
2883	struct btrfs_path *path = iter->path;
2884	struct btrfs_extent_inline_ref *iref;
2885	int ret;
2886	u32 size;
2887
2888	if (btrfs_backref_iter_is_inline_ref(iter)) {
2889		/* We're still inside the inline refs */
2890		ASSERT(iter->cur_ptr < iter->end_ptr);
2891
2892		if (btrfs_backref_has_tree_block_info(iter)) {
2893			/* First tree block info */
2894			size = sizeof(struct btrfs_tree_block_info);
2895		} else {
2896			/* Use inline ref type to determine the size */
2897			int type;
2898
2899			iref = (struct btrfs_extent_inline_ref *)
2900				((unsigned long)iter->cur_ptr);
2901			type = btrfs_extent_inline_ref_type(eb, iref);
2902
2903			size = btrfs_extent_inline_ref_size(type);
2904		}
2905		iter->cur_ptr += size;
2906		if (iter->cur_ptr < iter->end_ptr)
2907			return 0;
2908
2909		/* All inline items iterated, fall through */
2910	}
2911
2912	/* We're at keyed items, there is no inline item, go to the next one */
2913	extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2914	ret = btrfs_next_item(extent_root, iter->path);
2915	if (ret)
2916		return ret;
2917
2918	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2919	if (iter->cur_key.objectid != iter->bytenr ||
2920	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2921	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2922		return 1;
2923	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2924					path->slots[0]);
2925	iter->cur_ptr = iter->item_ptr;
2926	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2927						path->slots[0]);
2928	return 0;
2929}
2930
2931void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2932			      struct btrfs_backref_cache *cache, int is_reloc)
2933{
2934	int i;
2935
2936	cache->rb_root = RB_ROOT;
2937	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2938		INIT_LIST_HEAD(&cache->pending[i]);
2939	INIT_LIST_HEAD(&cache->changed);
2940	INIT_LIST_HEAD(&cache->detached);
2941	INIT_LIST_HEAD(&cache->leaves);
2942	INIT_LIST_HEAD(&cache->pending_edge);
2943	INIT_LIST_HEAD(&cache->useless_node);
2944	cache->fs_info = fs_info;
2945	cache->is_reloc = is_reloc;
2946}
2947
2948struct btrfs_backref_node *btrfs_backref_alloc_node(
2949		struct btrfs_backref_cache *cache, u64 bytenr, int level)
2950{
2951	struct btrfs_backref_node *node;
2952
2953	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2954	node = kzalloc(sizeof(*node), GFP_NOFS);
2955	if (!node)
2956		return node;
2957
2958	INIT_LIST_HEAD(&node->list);
2959	INIT_LIST_HEAD(&node->upper);
2960	INIT_LIST_HEAD(&node->lower);
2961	RB_CLEAR_NODE(&node->rb_node);
2962	cache->nr_nodes++;
2963	node->level = level;
2964	node->bytenr = bytenr;
2965
2966	return node;
2967}
2968
2969struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2970		struct btrfs_backref_cache *cache)
2971{
2972	struct btrfs_backref_edge *edge;
2973
2974	edge = kzalloc(sizeof(*edge), GFP_NOFS);
2975	if (edge)
2976		cache->nr_edges++;
2977	return edge;
2978}
2979
2980/*
2981 * Drop the backref node from cache, also cleaning up all its
2982 * upper edges and any uncached nodes in the path.
2983 *
2984 * This cleanup happens bottom up, thus the node should either
2985 * be the lowest node in the cache or a detached node.
2986 */
2987void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2988				struct btrfs_backref_node *node)
2989{
2990	struct btrfs_backref_node *upper;
2991	struct btrfs_backref_edge *edge;
2992
2993	if (!node)
2994		return;
2995
2996	BUG_ON(!node->lowest && !node->detached);
2997	while (!list_empty(&node->upper)) {
2998		edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2999				  list[LOWER]);
3000		upper = edge->node[UPPER];
3001		list_del(&edge->list[LOWER]);
3002		list_del(&edge->list[UPPER]);
3003		btrfs_backref_free_edge(cache, edge);
3004
3005		/*
3006		 * Add the node to leaf node list if no other child block
3007		 * cached.
3008		 */
3009		if (list_empty(&upper->lower)) {
3010			list_add_tail(&upper->lower, &cache->leaves);
3011			upper->lowest = 1;
3012		}
3013	}
3014
3015	btrfs_backref_drop_node(cache, node);
3016}
3017
3018/*
3019 * Release all nodes/edges from current cache
3020 */
3021void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3022{
3023	struct btrfs_backref_node *node;
3024	int i;
3025
3026	while (!list_empty(&cache->detached)) {
3027		node = list_entry(cache->detached.next,
3028				  struct btrfs_backref_node, list);
3029		btrfs_backref_cleanup_node(cache, node);
3030	}
3031
3032	while (!list_empty(&cache->leaves)) {
3033		node = list_entry(cache->leaves.next,
3034				  struct btrfs_backref_node, lower);
3035		btrfs_backref_cleanup_node(cache, node);
3036	}
3037
3038	cache->last_trans = 0;
3039
3040	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3041		ASSERT(list_empty(&cache->pending[i]));
3042	ASSERT(list_empty(&cache->pending_edge));
3043	ASSERT(list_empty(&cache->useless_node));
3044	ASSERT(list_empty(&cache->changed));
3045	ASSERT(list_empty(&cache->detached));
3046	ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3047	ASSERT(!cache->nr_nodes);
3048	ASSERT(!cache->nr_edges);
3049}
3050
3051/*
3052 * Handle direct tree backref
3053 *
3054 * Direct tree backref means, the backref item shows its parent bytenr
3055 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3056 *
3057 * @ref_key:	The converted backref key.
3058 *		For keyed backref, it's the item key.
3059 *		For inlined backref, objectid is the bytenr,
3060 *		type is btrfs_inline_ref_type, offset is
3061 *		btrfs_inline_ref_offset.
3062 */
3063static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3064				      struct btrfs_key *ref_key,
3065				      struct btrfs_backref_node *cur)
3066{
3067	struct btrfs_backref_edge *edge;
3068	struct btrfs_backref_node *upper;
3069	struct rb_node *rb_node;
3070
3071	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3072
3073	/* Only reloc root uses backref pointing to itself */
3074	if (ref_key->objectid == ref_key->offset) {
3075		struct btrfs_root *root;
3076
3077		cur->is_reloc_root = 1;
3078		/* Only reloc backref cache cares about a specific root */
3079		if (cache->is_reloc) {
3080			root = find_reloc_root(cache->fs_info, cur->bytenr);
3081			if (!root)
3082				return -ENOENT;
3083			cur->root = root;
3084		} else {
3085			/*
3086			 * For generic purpose backref cache, reloc root node
3087			 * is useless.
3088			 */
3089			list_add(&cur->list, &cache->useless_node);
3090		}
3091		return 0;
3092	}
3093
3094	edge = btrfs_backref_alloc_edge(cache);
3095	if (!edge)
3096		return -ENOMEM;
3097
3098	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3099	if (!rb_node) {
3100		/* Parent node not yet cached */
3101		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3102					   cur->level + 1);
3103		if (!upper) {
3104			btrfs_backref_free_edge(cache, edge);
3105			return -ENOMEM;
3106		}
3107
3108		/*
3109		 *  Backrefs for the upper level block isn't cached, add the
3110		 *  block to pending list
3111		 */
3112		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3113	} else {
3114		/* Parent node already cached */
3115		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3116		ASSERT(upper->checked);
3117		INIT_LIST_HEAD(&edge->list[UPPER]);
3118	}
3119	btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3120	return 0;
3121}
3122
3123/*
3124 * Handle indirect tree backref
3125 *
3126 * Indirect tree backref means, we only know which tree the node belongs to.
3127 * We still need to do a tree search to find out the parents. This is for
3128 * TREE_BLOCK_REF backref (keyed or inlined).
3129 *
3130 * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
3131 * @tree_key:	The first key of this tree block.
3132 * @path:	A clean (released) path, to avoid allocating path every time
3133 *		the function get called.
3134 */
3135static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
3136					struct btrfs_path *path,
3137					struct btrfs_key *ref_key,
3138					struct btrfs_key *tree_key,
3139					struct btrfs_backref_node *cur)
3140{
3141	struct btrfs_fs_info *fs_info = cache->fs_info;
3142	struct btrfs_backref_node *upper;
3143	struct btrfs_backref_node *lower;
3144	struct btrfs_backref_edge *edge;
3145	struct extent_buffer *eb;
3146	struct btrfs_root *root;
3147	struct rb_node *rb_node;
3148	int level;
3149	bool need_check = true;
3150	int ret;
3151
3152	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3153	if (IS_ERR(root))
3154		return PTR_ERR(root);
3155	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3156		cur->cowonly = 1;
3157
3158	if (btrfs_root_level(&root->root_item) == cur->level) {
3159		/* Tree root */
3160		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3161		/*
3162		 * For reloc backref cache, we may ignore reloc root.  But for
3163		 * general purpose backref cache, we can't rely on
3164		 * btrfs_should_ignore_reloc_root() as it may conflict with
3165		 * current running relocation and lead to missing root.
3166		 *
3167		 * For general purpose backref cache, reloc root detection is
3168		 * completely relying on direct backref (key->offset is parent
3169		 * bytenr), thus only do such check for reloc cache.
3170		 */
3171		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3172			btrfs_put_root(root);
3173			list_add(&cur->list, &cache->useless_node);
3174		} else {
3175			cur->root = root;
3176		}
3177		return 0;
3178	}
3179
3180	level = cur->level + 1;
3181
3182	/* Search the tree to find parent blocks referring to the block */
3183	path->search_commit_root = 1;
3184	path->skip_locking = 1;
3185	path->lowest_level = level;
3186	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3187	path->lowest_level = 0;
3188	if (ret < 0) {
3189		btrfs_put_root(root);
3190		return ret;
3191	}
3192	if (ret > 0 && path->slots[level] > 0)
3193		path->slots[level]--;
3194
3195	eb = path->nodes[level];
3196	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3197		btrfs_err(fs_info,
3198"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3199			  cur->bytenr, level - 1, root->root_key.objectid,
3200			  tree_key->objectid, tree_key->type, tree_key->offset);
3201		btrfs_put_root(root);
3202		ret = -ENOENT;
3203		goto out;
3204	}
3205	lower = cur;
3206
3207	/* Add all nodes and edges in the path */
3208	for (; level < BTRFS_MAX_LEVEL; level++) {
3209		if (!path->nodes[level]) {
3210			ASSERT(btrfs_root_bytenr(&root->root_item) ==
3211			       lower->bytenr);
3212			/* Same as previous should_ignore_reloc_root() call */
3213			if (btrfs_should_ignore_reloc_root(root) &&
3214			    cache->is_reloc) {
3215				btrfs_put_root(root);
3216				list_add(&lower->list, &cache->useless_node);
3217			} else {
3218				lower->root = root;
3219			}
3220			break;
3221		}
3222
3223		edge = btrfs_backref_alloc_edge(cache);
3224		if (!edge) {
3225			btrfs_put_root(root);
3226			ret = -ENOMEM;
3227			goto out;
3228		}
3229
3230		eb = path->nodes[level];
3231		rb_node = rb_simple_search(&cache->rb_root, eb->start);
3232		if (!rb_node) {
3233			upper = btrfs_backref_alloc_node(cache, eb->start,
3234							 lower->level + 1);
3235			if (!upper) {
3236				btrfs_put_root(root);
3237				btrfs_backref_free_edge(cache, edge);
3238				ret = -ENOMEM;
3239				goto out;
3240			}
3241			upper->owner = btrfs_header_owner(eb);
3242			if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3243				upper->cowonly = 1;
3244
3245			/*
3246			 * If we know the block isn't shared we can avoid
3247			 * checking its backrefs.
3248			 */
3249			if (btrfs_block_can_be_shared(root, eb))
3250				upper->checked = 0;
3251			else
3252				upper->checked = 1;
3253
3254			/*
3255			 * Add the block to pending list if we need to check its
3256			 * backrefs, we only do this once while walking up a
3257			 * tree as we will catch anything else later on.
3258			 */
3259			if (!upper->checked && need_check) {
3260				need_check = false;
3261				list_add_tail(&edge->list[UPPER],
3262					      &cache->pending_edge);
3263			} else {
3264				if (upper->checked)
3265					need_check = true;
3266				INIT_LIST_HEAD(&edge->list[UPPER]);
3267			}
3268		} else {
3269			upper = rb_entry(rb_node, struct btrfs_backref_node,
3270					 rb_node);
3271			ASSERT(upper->checked);
3272			INIT_LIST_HEAD(&edge->list[UPPER]);
3273			if (!upper->owner)
3274				upper->owner = btrfs_header_owner(eb);
3275		}
3276		btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3277
3278		if (rb_node) {
3279			btrfs_put_root(root);
3280			break;
3281		}
3282		lower = upper;
3283		upper = NULL;
3284	}
3285out:
3286	btrfs_release_path(path);
3287	return ret;
3288}
3289
3290/*
3291 * Add backref node @cur into @cache.
3292 *
3293 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3294 *	 links aren't yet bi-directional. Needs to finish such links.
3295 *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
3296 *
3297 * @path:	Released path for indirect tree backref lookup
3298 * @iter:	Released backref iter for extent tree search
3299 * @node_key:	The first key of the tree block
3300 */
3301int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
3302				struct btrfs_path *path,
3303				struct btrfs_backref_iter *iter,
3304				struct btrfs_key *node_key,
3305				struct btrfs_backref_node *cur)
3306{
3307	struct btrfs_fs_info *fs_info = cache->fs_info;
3308	struct btrfs_backref_edge *edge;
3309	struct btrfs_backref_node *exist;
3310	int ret;
3311
3312	ret = btrfs_backref_iter_start(iter, cur->bytenr);
3313	if (ret < 0)
3314		return ret;
3315	/*
3316	 * We skip the first btrfs_tree_block_info, as we don't use the key
3317	 * stored in it, but fetch it from the tree block
3318	 */
3319	if (btrfs_backref_has_tree_block_info(iter)) {
3320		ret = btrfs_backref_iter_next(iter);
3321		if (ret < 0)
3322			goto out;
3323		/* No extra backref? This means the tree block is corrupted */
3324		if (ret > 0) {
3325			ret = -EUCLEAN;
3326			goto out;
3327		}
3328	}
3329	WARN_ON(cur->checked);
3330	if (!list_empty(&cur->upper)) {
3331		/*
3332		 * The backref was added previously when processing backref of
3333		 * type BTRFS_TREE_BLOCK_REF_KEY
3334		 */
3335		ASSERT(list_is_singular(&cur->upper));
3336		edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3337				  list[LOWER]);
3338		ASSERT(list_empty(&edge->list[UPPER]));
3339		exist = edge->node[UPPER];
3340		/*
3341		 * Add the upper level block to pending list if we need check
3342		 * its backrefs
3343		 */
3344		if (!exist->checked)
3345			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3346	} else {
3347		exist = NULL;
3348	}
3349
3350	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3351		struct extent_buffer *eb;
3352		struct btrfs_key key;
3353		int type;
3354
3355		cond_resched();
3356		eb = btrfs_backref_get_eb(iter);
3357
3358		key.objectid = iter->bytenr;
3359		if (btrfs_backref_iter_is_inline_ref(iter)) {
3360			struct btrfs_extent_inline_ref *iref;
3361
3362			/* Update key for inline backref */
3363			iref = (struct btrfs_extent_inline_ref *)
3364				((unsigned long)iter->cur_ptr);
3365			type = btrfs_get_extent_inline_ref_type(eb, iref,
3366							BTRFS_REF_TYPE_BLOCK);
3367			if (type == BTRFS_REF_TYPE_INVALID) {
3368				ret = -EUCLEAN;
3369				goto out;
3370			}
3371			key.type = type;
3372			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3373		} else {
3374			key.type = iter->cur_key.type;
3375			key.offset = iter->cur_key.offset;
3376		}
3377
3378		/*
3379		 * Parent node found and matches current inline ref, no need to
3380		 * rebuild this node for this inline ref
3381		 */
3382		if (exist &&
3383		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3384		      exist->owner == key.offset) ||
3385		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3386		      exist->bytenr == key.offset))) {
3387			exist = NULL;
3388			continue;
3389		}
3390
3391		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3392		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3393			ret = handle_direct_tree_backref(cache, &key, cur);
3394			if (ret < 0)
3395				goto out;
3396			continue;
3397		} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3398			ret = -EINVAL;
3399			btrfs_print_v0_err(fs_info);
3400			btrfs_handle_fs_error(fs_info, ret, NULL);
3401			goto out;
3402		} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3403			continue;
3404		}
3405
3406		/*
3407		 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3408		 * means the root objectid. We need to search the tree to get
3409		 * its parent bytenr.
3410		 */
3411		ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3412						   cur);
3413		if (ret < 0)
3414			goto out;
3415	}
3416	ret = 0;
3417	cur->checked = 1;
3418	WARN_ON(exist);
3419out:
3420	btrfs_backref_iter_release(iter);
3421	return ret;
3422}
3423
3424/*
3425 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3426 */
3427int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3428				     struct btrfs_backref_node *start)
3429{
3430	struct list_head *useless_node = &cache->useless_node;
3431	struct btrfs_backref_edge *edge;
3432	struct rb_node *rb_node;
3433	LIST_HEAD(pending_edge);
3434
3435	ASSERT(start->checked);
3436
3437	/* Insert this node to cache if it's not COW-only */
3438	if (!start->cowonly) {
3439		rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3440					   &start->rb_node);
3441		if (rb_node)
3442			btrfs_backref_panic(cache->fs_info, start->bytenr,
3443					    -EEXIST);
3444		list_add_tail(&start->lower, &cache->leaves);
3445	}
3446
3447	/*
3448	 * Use breadth first search to iterate all related edges.
3449	 *
3450	 * The starting points are all the edges of this node
3451	 */
3452	list_for_each_entry(edge, &start->upper, list[LOWER])
3453		list_add_tail(&edge->list[UPPER], &pending_edge);
3454
3455	while (!list_empty(&pending_edge)) {
3456		struct btrfs_backref_node *upper;
3457		struct btrfs_backref_node *lower;
3458
3459		edge = list_first_entry(&pending_edge,
3460				struct btrfs_backref_edge, list[UPPER]);
3461		list_del_init(&edge->list[UPPER]);
3462		upper = edge->node[UPPER];
3463		lower = edge->node[LOWER];
3464
3465		/* Parent is detached, no need to keep any edges */
3466		if (upper->detached) {
3467			list_del(&edge->list[LOWER]);
3468			btrfs_backref_free_edge(cache, edge);
3469
3470			/* Lower node is orphan, queue for cleanup */
3471			if (list_empty(&lower->upper))
3472				list_add(&lower->list, useless_node);
3473			continue;
3474		}
3475
3476		/*
3477		 * All new nodes added in current build_backref_tree() haven't
3478		 * been linked to the cache rb tree.
3479		 * So if we have upper->rb_node populated, this means a cache
3480		 * hit. We only need to link the edge, as @upper and all its
3481		 * parents have already been linked.
3482		 */
3483		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3484			if (upper->lowest) {
3485				list_del_init(&upper->lower);
3486				upper->lowest = 0;
3487			}
3488
3489			list_add_tail(&edge->list[UPPER], &upper->lower);
3490			continue;
3491		}
3492
3493		/* Sanity check, we shouldn't have any unchecked nodes */
3494		if (!upper->checked) {
3495			ASSERT(0);
3496			return -EUCLEAN;
3497		}
3498
3499		/* Sanity check, COW-only node has non-COW-only parent */
3500		if (start->cowonly != upper->cowonly) {
3501			ASSERT(0);
3502			return -EUCLEAN;
3503		}
3504
3505		/* Only cache non-COW-only (subvolume trees) tree blocks */
3506		if (!upper->cowonly) {
3507			rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3508						   &upper->rb_node);
3509			if (rb_node) {
3510				btrfs_backref_panic(cache->fs_info,
3511						upper->bytenr, -EEXIST);
3512				return -EUCLEAN;
3513			}
3514		}
3515
3516		list_add_tail(&edge->list[UPPER], &upper->lower);
3517
3518		/*
3519		 * Also queue all the parent edges of this uncached node
3520		 * to finish the upper linkage
3521		 */
3522		list_for_each_entry(edge, &upper->upper, list[LOWER])
3523			list_add_tail(&edge->list[UPPER], &pending_edge);
3524	}
3525	return 0;
3526}
3527
3528void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3529				 struct btrfs_backref_node *node)
3530{
3531	struct btrfs_backref_node *lower;
3532	struct btrfs_backref_node *upper;
3533	struct btrfs_backref_edge *edge;
3534
3535	while (!list_empty(&cache->useless_node)) {
3536		lower = list_first_entry(&cache->useless_node,
3537				   struct btrfs_backref_node, list);
3538		list_del_init(&lower->list);
3539	}
3540	while (!list_empty(&cache->pending_edge)) {
3541		edge = list_first_entry(&cache->pending_edge,
3542				struct btrfs_backref_edge, list[UPPER]);
3543		list_del(&edge->list[UPPER]);
3544		list_del(&edge->list[LOWER]);
3545		lower = edge->node[LOWER];
3546		upper = edge->node[UPPER];
3547		btrfs_backref_free_edge(cache, edge);
3548
3549		/*
3550		 * Lower is no longer linked to any upper backref nodes and
3551		 * isn't in the cache, we can free it ourselves.
3552		 */
3553		if (list_empty(&lower->upper) &&
3554		    RB_EMPTY_NODE(&lower->rb_node))
3555			list_add(&lower->list, &cache->useless_node);
3556
3557		if (!RB_EMPTY_NODE(&upper->rb_node))
3558			continue;
3559
3560		/* Add this guy's upper edges to the list to process */
3561		list_for_each_entry(edge, &upper->upper, list[LOWER])
3562			list_add_tail(&edge->list[UPPER],
3563				      &cache->pending_edge);
3564		if (list_empty(&upper->upper))
3565			list_add(&upper->list, &cache->useless_node);
3566	}
3567
3568	while (!list_empty(&cache->useless_node)) {
3569		lower = list_first_entry(&cache->useless_node,
3570				   struct btrfs_backref_node, list);
3571		list_del_init(&lower->list);
3572		if (lower == node)
3573			node = NULL;
3574		btrfs_backref_drop_node(cache, lower);
3575	}
3576
3577	btrfs_backref_cleanup_node(cache, node);
3578	ASSERT(list_empty(&cache->useless_node) &&
3579	       list_empty(&cache->pending_edge));
3580}